Skip to content

Commit b2a34fc

Browse files
committed
get rid of mysql-connector-java due to GPL; get rid of pegdown-doclet due to GPL; fix javadoc errors since standard javadoc processor will now be back in use
1 parent da55668 commit b2a34fc

20 files changed

+146
-156
lines changed

pom.xml

Lines changed: 4 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -90,13 +90,6 @@
9090
<windowtitle>${javadoc-title}</windowtitle>
9191
<maxmemory>3500m</maxmemory>
9292
<use/>
93-
<doclet>ch.raffael.doclets.pegdown.PegdownDoclet</doclet>
94-
<docletArtifact>
95-
<groupId>ch.raffael.pegdown-doclet</groupId>
96-
<artifactId>pegdown-doclet</artifactId>
97-
<version>1.3</version>
98-
</docletArtifact>
99-
<useStandardDocletOptions>true</useStandardDocletOptions>
10093
</configuration>
10194
<executions>
10295
<execution>
@@ -325,12 +318,6 @@
325318
<version>1.0</version>
326319
<scope>provided</scope>
327320
</dependency>
328-
<dependency>
329-
<groupId>ch.raffael.pegdown-doclet</groupId>
330-
<artifactId>pegdown-doclet</artifactId>
331-
<version>1.3</version>
332-
<scope>provided</scope>
333-
</dependency>
334321
<dependency>
335322
<groupId>com.fasterxml.jackson.dataformat</groupId>
336323
<artifactId>jackson-dataformat-xml</artifactId>
@@ -355,12 +342,6 @@
355342
<version>4.3.10.RELEASE</version>
356343
<scope>provided</scope>
357344
</dependency>
358-
<dependency>
359-
<groupId>mysql</groupId>
360-
<artifactId>mysql-connector-java</artifactId>
361-
<version>6.0.6</version>
362-
<scope>provided</scope>
363-
</dependency>
364345
<dependency>
365346
<groupId>org.apache.commons</groupId>
366347
<artifactId>commons-lang3</artifactId>
@@ -374,10 +355,10 @@
374355
<scope>provided</scope>
375356
</dependency>
376357
<dependency>
377-
<groupId>org.hsqldb</groupId>
378-
<artifactId>hsqldb</artifactId>
379-
<version>2.4.0</version>
380-
<scope>test</scope>
358+
<groupId>org.hsqldb</groupId>
359+
<artifactId>hsqldb</artifactId>
360+
<version>2.4.0</version>
361+
<scope>test</scope>
381362
</dependency>
382363
</dependencies>
383364
</project>

src/main/java/com/marklogic/client/DatabaseClientFactory.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -242,9 +242,9 @@ public interface SecurityContext {
242242
* <br>
243243
*
244244
* If we init the sslContext with null TrustManager, it would use the
245-
* <java-home>/lib/security/cacerts file for trusted root certificates, if
245+
* &lt;java-home&gt;/lib/security/cacerts file for trusted root certificates, if
246246
* javax.net.ssl.trustStore system property is not set and
247-
* <java-home>/lib/security/jssecacerts is not present. See <a href =
247+
* &lt;java-home&gt;/lib/security/jssecacerts is not present. See <a href =
248248
* "http://docs.oracle.com/javase/7/docs/technotes/guides/security/jsse/JSSERefGuide.html">JSSE
249249
* Reference Guide</a> for more information on SSL and TrustManagers.<br>
250250
* <br>

src/main/java/com/marklogic/client/datamovement/BatchListener.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,14 +31,14 @@ public interface BatchListener<T extends Batch<?>> {
3131
* QueryBatcher qhb = dataMovementManager.newQueryBatcher(query)
3232
* .withBatchSize(1000)
3333
* .withThreadCount(20)
34-
* .onUrisReady(batch -> {
34+
* .onUrisReady(batch -&gt; {
3535
* for ( String uri : batch.getItems() ) {
3636
* if ( uri.endsWith(".txt") ) {
3737
* batch.getClient().newDocumentManager().delete(uri);
3838
* }
3939
* }
4040
* })
41-
* .onQueryFailure(queryBatchException -> queryBatchException.printStackTrace());
41+
* .onQueryFailure(queryBatchException -&gt; queryBatchException.printStackTrace());
4242
* JobTicket ticket = dataMovementManager.startJob(qhb);
4343
* qhb.awaitCompletion();
4444
* dataMovementManager.stopJob(ticket);

src/main/java/com/marklogic/client/datamovement/Batcher.java

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ public interface Batcher {
6363

6464
/**
6565
* The number of threads to be used internally by this job to perform
66-
* concurrent tasks on batches (usually > 10). With some experimentation with your custom
66+
* concurrent tasks on batches (usually &gt; 10). With some experimentation with your custom
6767
* job and client environment, this value can be tuned. Tuning this value is
6868
* one of the best ways to achieve optimal throughput or to throttle the
6969
* server resources used by this job. Setting this to 1 does not guarantee
@@ -72,6 +72,8 @@ public interface Batcher {
7272
*
7373
* This method cannot be called after the job has started.
7474
*
75+
* @param threadCount the number of threads to use in this Batcher
76+
*
7577
* @return this instance (for method chaining)
7678
*/
7779
Batcher withThreadCount(int threadCount);
@@ -93,6 +95,8 @@ public interface Batcher {
9395
* will come from {@link DataMovementManager#readForestConfig}, perhaps wrapped by
9496
* something like {@link FilteredForestConfiguration}.
9597
*
98+
* @param forestConfig the updated list of forests with thier hosts, etc.
99+
*
96100
* @return this instance (for method chaining)
97101
*/
98102
Batcher withForestConfig(ForestConfiguration forestConfig);

src/main/java/com/marklogic/client/datamovement/DeleteListener.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@
4343
* deleteBatcher.awaitCompletion();
4444
* moveMgr.stopJob(ticket);
4545
*
46-
* With Iterator<String>, pass a DeleteListener instance to
46+
* With Iterator&lt;String&gt;, pass a DeleteListener instance to
4747
* QueryBatcher onUrisReady like so:
4848
*
4949
* QueryBatcher deleteBatcher = moveMgr.newQueryBatcher(query)

src/main/java/com/marklogic/client/datamovement/ExportListener.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,11 @@
4848
* .onUrisReady(
4949
* new ExportListener()
5050
* .withConsistentSnapshot()
51-
* .onDocumentReady(doc -> {
51+
* .onDocumentReady(doc -&gt; {
5252
* logger.debug("Contents=[{}]", doc.getContentAs(String.class));
5353
* })
5454
* )
55-
* .onQueryFailure(exception -> exception.printStackTrace());
55+
* .onQueryFailure(exception -&gt; exception.printStackTrace());
5656
*
5757
* JobTicket ticket = moveMgr.startJob(exportBatcher);
5858
* exportBatcher.awaitCompletion();

src/main/java/com/marklogic/client/datamovement/QueryBatchListener.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,14 +31,14 @@ public interface QueryBatchListener extends BatchListener<QueryBatch> {
3131
* QueryBatcher qhb = dataMovementManager.newQueryBatcher(query)
3232
* .withBatchSize(1000)
3333
* .withThreadCount(20)
34-
* .onUrisReady(batch -> {
34+
* .onUrisReady(batch -&gt; {
3535
* for ( String uri : batch.getItems() ) {
3636
* if ( uri.endsWith(".txt") ) {
3737
* batch.getClient().newDocumentManager().delete(uri);
3838
* }
3939
* }
4040
* })
41-
* .onQueryFailure(queryBatchException -> queryBatchException.printStackTrace());
41+
* .onQueryFailure(queryBatchException -&gt; queryBatchException.printStackTrace());
4242
* JobTicket ticket = dataMovementManager.startJob(qhb);
4343
* qhb.awaitCompletion();
4444
* dataMovementManager.stopJob(ticket);
@@ -50,6 +50,8 @@ public interface QueryBatchListener extends BatchListener<QueryBatch> {
5050
/**
5151
* This default method should be implemented by custom listeners that should
5252
* be retried in case of failover.
53+
*
54+
* @param queryBatcher the QueryBatcher which will call this Listener
5355
*/
5456
default void initializeListener(QueryBatcher queryBatcher) {}
5557
}

src/main/java/com/marklogic/client/datamovement/QueryBatcher.java

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
* QueryBatcher qhb = dataMovementManager.newQueryBatcher(query)
4343
* .withConsistentSnapshot()
4444
* .onUrisReady( new DeleteListener() )
45-
* .onQueryFailure(exception -> exception.printStackTrace());
45+
* .onQueryFailure(exception -&gt; exception.printStackTrace());
4646
* JobTicket ticket = dataMovementManager.startJob(qhb);
4747
* qhb.awaitCompletion();
4848
* dataMovementManager.stopJob(ticket);
@@ -78,33 +78,33 @@
7878
* .withBatchSize(1000)
7979
* .withThreadCount(20)
8080
* .withConsistentSnapshot()
81-
* .onUrisReady(batch -> {
81+
* .onUrisReady(batch -&gt; {
8282
* for ( String uri : batch.getItems() ) {
8383
* if ( uri.endsWith(".txt") ) {
8484
* client.newDocumentManager().delete(uri);
8585
* }
8686
* }
8787
* })
88-
* .onQueryFailure(exception -> exception.printStackTrace());
88+
* .onQueryFailure(exception -&gt; exception.printStackTrace());
8989
* JobTicket ticket = dataMovementManager.startJob(qhb);
9090
* qhb.awaitCompletion();
9191
* dataMovementManager.stopJob(ticket);
9292
*
9393
* Example of queueing uris in memory instead of using withConsistentSnapshot():
9494
*
95-
* ArrayList<String> uris = Collections.synchronizedList(new ArrayList<>());
95+
* ArrayList&lt;String&gt; uris = Collections.synchronizedList(new ArrayList&lt;&gt;());
9696
* QueryBatcher getUris = dataMovementManager.newQueryBatcher(query)
9797
* .withBatchSize(5000)
98-
* .onUrisReady( batch -> uris.addAll(Arrays.asList(batch.getItems())) )
99-
* .onQueryFailure(exception -> exception.printStackTrace());
98+
* .onUrisReady( batch -&gt; uris.addAll(Arrays.asList(batch.getItems())) )
99+
* .onQueryFailure(exception -&gt; exception.printStackTrace());
100100
* JobTicket getUrisTicket = dataMovementManager.startJob(getUris);
101101
* getUris.awaitCompletion();
102102
* dataMovementManager.stopJob(getUrisTicket);
103103
*
104104
* // now we have the uris, let's step through them
105105
* QueryBatcher performDelete = moveMgr.newQueryBatcher(uris.iterator())
106106
* .onUrisReady(new DeleteListener())
107-
* .onQueryFailure(exception -> exception.printStackTrace());
107+
* .onQueryFailure(exception -&gt; exception.printStackTrace());
108108
* JobTicket ticket = dataMovementManager.startJob(performDelete);
109109
* performDelete.awaitCompletion();
110110
* dataMovementManager.stopJob(ticket);

src/main/java/com/marklogic/client/datamovement/QueryEvent.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ public interface QueryEvent {
2727
/** A reference to the batcher--useful for modifying the
2828
* {@link Batcher#withForestConfig ForestConfiguration}
2929
* or calling {@link DataMovementManager#stopJob(Batcher)} if needed.
30+
*
31+
* @return the QueryBatcher that created this event
3032
*/
3133
QueryBatcher getBatcher();
3234

src/main/java/com/marklogic/client/datamovement/UrisToWriterListener.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
* QueryBatcher getUris = dataMovementManager.newQueryBatcher(query)
3636
* .withBatchSize(5000)
3737
* .onUrisReady( new UrisToWriterListener(writer) )
38-
* .onQueryFailure(exception -> exception.printStackTrace());
38+
* .onQueryFailure(exception -&gt; exception.printStackTrace());
3939
* JobTicket getUrisTicket = dataMovementManager.startJob(getUris);
4040
* getUris.awaitCompletion();
4141
* dataMovementManager.stopJob(getUrisTicket);
@@ -46,7 +46,7 @@
4646
* BufferedReader reader = new BufferedReader(new FileReader("uriCache.txt"));
4747
* QueryBatcher performDelete = dataMovementManager.newQueryBatcher(reader.lines().iterator())
4848
* .onUrisReady(new DeleteListener())
49-
* .onQueryFailure(exception -> exception.printStackTrace());
49+
* .onQueryFailure(exception -&gt; exception.printStackTrace());
5050
* JobTicket ticket = dataMovementManager.startJob(performDelete);
5151
* performDelete.awaitCompletion();
5252
* dataMovementManager.stopJob(ticket);

0 commit comments

Comments
 (0)