Skip to content

Commit

Permalink
hibernate#49 use Logger instead of System.out.println
Browse files Browse the repository at this point in the history
  • Loading branch information
mincong-h committed Jun 19, 2016
1 parent 0449388 commit 9a95b24
Show file tree
Hide file tree
Showing 12 changed files with 65 additions and 21 deletions.
Expand Up @@ -6,6 +6,8 @@
import javax.inject.Inject;
import javax.inject.Named;

import org.jboss.logging.Logger;

/**
* Decider decides the next step-execution before the start of index chunk. If
* user requires a index purge, then the next step should be a purge, else,
Expand All @@ -21,14 +23,16 @@ public class AfterPurgeDecider implements Decider {
@Inject @BatchProperty
private Boolean optimizeAfterPurge;

private static final Logger logger = Logger.getLogger(AfterPurgeDecider.class);

/**
* Decide the next step using the target batch property.
*
* @param executions step executions.
*/
@Override
public String decide(StepExecution[] executions) throws Exception {
System.out.printf("AfterPurgeDecide#decide: %s%n", String.valueOf(optimizeAfterPurge));
logger.debugf("optimzeAfterPurge = %b%n", optimizeAfterPurge);
return String.valueOf(optimizeAfterPurge);
}
}
Expand Up @@ -7,6 +7,8 @@
import javax.inject.Inject;
import javax.inject.Named;

import org.jboss.logging.Logger;

/**
* Read entity IDs from {@code IndexingContext}. Each time, there's one array
* being read. The number of IDs inside the array depends on the array capacity.
Expand All @@ -31,6 +33,8 @@ public class BatchItemReader implements ItemReader {
@Inject
private IndexingContext indexingContext;

private static final Logger logger = Logger.getLogger(BatchItemReader.class);

/**
* The checkpointInfo method returns the current checkpoint data for this
* reader. It is called before a chunk checkpoint is committed.
Expand All @@ -40,7 +44,7 @@ public class BatchItemReader implements ItemReader {
*/
@Override
public Serializable checkpointInfo() throws Exception {
// System.out.println("BatchItemReader#checkpointInfo()");
logger.debug("#checkpointInfo()");
return null;
}

Expand All @@ -51,7 +55,7 @@ public Serializable checkpointInfo() throws Exception {
*/
@Override
public void close() throws Exception {
System.out.println("BatchItemReader#close()");
logger.debug("#close()");
}

/**
Expand All @@ -65,7 +69,7 @@ public void close() throws Exception {
*/
@Override
public void open(Serializable checkpoint) throws Exception {
System.out.printf("BatchItemReader#open(...): entityType = %s%n", entityType);
logger.debugf("#open(...): entityType = %s%n", entityType);
}

/**
Expand Down
Expand Up @@ -9,12 +9,16 @@
import javax.inject.Inject;
import javax.inject.Named;

import org.jboss.logging.Logger;

@Named
public class EntityPartitionMapper implements PartitionMapper {

@Inject @BatchProperty(name = "rootEntities")
private String rootEntitiesStr;

private static final Logger logger = Logger.getLogger(EntityPartitionMapper.class);

@Override
public PartitionPlan mapPartitions() throws Exception {

Expand All @@ -24,13 +28,13 @@ public PartitionPlan mapPartitions() throws Exception {

@Override
public int getPartitions() {
System.out.printf("#mapPartitions(): %d partitions.%n", rootEntities.length);
logger.infof("%d partitions.%n", rootEntities.length);
return rootEntities.length;
}

@Override
public int getThreads() {
System.out.printf("#getThreads(): %d threads.%n", getPartitions());
logger.infof("%d threads.%n", getPartitions());
return getPartitions();
}

Expand Down
Expand Up @@ -14,6 +14,7 @@
import org.hibernate.ScrollableResults;
import org.hibernate.Session;
import org.hibernate.criterion.Projections;
import org.jboss.logging.Logger;

/**
* Read identifiers of entities via entity manager. The result is going to be
Expand All @@ -37,6 +38,8 @@ public class IdProducerBatchlet implements Batchlet {
private EntityManager em;
private Session session;

private static final Logger logger = Logger.getLogger(IdProducerBatchlet.class);

/**
* Load id of all target entities using Hibernate Session. In order to
* follow the id loading progress, the total number will be additionally
Expand All @@ -57,7 +60,7 @@ public String process() throws Exception {
.setProjection(Projections.rowCount())
.setCacheable(false)
.uniqueResult();
System.out.printf("entityType = %s (%d rows).%n", entityType, rowCount);
logger.infof("entityType = %s (%D rows).%n", entityType, rowCount);
indexingContext.addEntityCount(rowCount);

// load ids and store in scrollable results
Expand Down
Expand Up @@ -4,13 +4,17 @@
import javax.batch.runtime.BatchStatus;
import javax.inject.Named;

import org.jboss.logging.Logger;

@Named
public class IndexPurgerBatchlet implements Batchlet {

private static final Logger logger = Logger.getLogger(IndexPurgerBatchlet.class);

@Override
public String process() throws Exception {

System.out.println("purging entities ...");
logger.info("purging entities ...");

return BatchStatus.COMPLETED.toString();
}
Expand Down
Expand Up @@ -8,6 +8,7 @@
import javax.inject.Singleton;

import org.hibernate.search.store.IndexShardingStrategy;
import org.jboss.logging.Logger;

/**
* Specific indexing context for mass indexer. Several attributes are used :
Expand All @@ -28,6 +29,7 @@ public class IndexingContext {
private ConcurrentHashMap<Class<?>, ConcurrentLinkedQueue<Serializable[]>> idQueues;
private IndexShardingStrategy indexShardingStrategy;
private long entityCount = 0;
private static final Logger logger = Logger.getLogger(IndexingContext.class);

public void add(Serializable[] clazzIDs, Class<?> clazz) {
idQueues.get(clazz).add(clazzIDs);
Expand All @@ -37,7 +39,7 @@ public Serializable[] poll(Class<?> clazz) {
// TODO: this method is really slow
Serializable[] IDs = idQueues.get(clazz).poll();
String len = (IDs == null) ? "null" : String.valueOf(IDs.length);
System.out.printf("Polling %s IDs for %s.%n", len, clazz.getName());
logger.debugf("Polling %d IDs for %s%n", len, clazz.getName());
return IDs;
}

Expand Down
Expand Up @@ -8,6 +8,8 @@
import javax.inject.Inject;
import javax.inject.Named;

import org.jboss.logging.Logger;

@Named
public class LucenePartitionAnalyzer implements PartitionAnalyzer {

Expand All @@ -20,6 +22,8 @@ public class LucenePartitionAnalyzer implements PartitionAnalyzer {
private int workCount = 0;
private float percentage = 0;

private static final Logger logger = Logger.getLogger(LucenePartitionAnalyzer.class);

/**
* Analyze data obtained from different partition plans via partition data
* collectors. The current analyze is to summarize to their progresses :
Expand All @@ -45,13 +49,13 @@ public void analyzeCollectorData(Serializable fromCollector) throws Exception {
if (entitiesLoaded != 0) {
percentage = workCount * 100f / entitiesLoaded;
}
System.out.printf("#analyzeCollectorData(): %d works processed (%.1f%%).%n",
logger.infof("#analyzeCollectorData(): %d works processed (%.1f%%).%n",
workCount, percentage);
}

@Override
public void analyzeStatus(BatchStatus batchStatus, String exitStatus)
throws Exception {
System.out.println("#analyzeStatus(...) called.");
logger.debug("#analyzeStatus(...) called.");
}
}
Expand Up @@ -11,6 +11,8 @@
import javax.inject.Inject;
import javax.inject.Named;

import org.jboss.logging.Logger;

/**
* Lucene partition mapper provides a partition plan to the Lucene production
* step: "produceLuceneDoc". The partition plan is defined dynamically,
Expand Down Expand Up @@ -40,6 +42,8 @@ public class LucenePartitionMapper implements PartitionMapper {
@Inject @BatchProperty private int threads;
@Inject @BatchProperty(name="rootEntities") private String rootEntitiesStr;

private static final Logger logger = Logger.getLogger(LucenePartitionMapper.class);

@Override
public PartitionPlan mapPartitions() throws Exception {

Expand All @@ -60,7 +64,7 @@ public PartitionPlan mapPartitions() throws Exception {
for (int i = 0; i < classPartitions; i++) {
classQueue.add(rootEntity.getName());
}
System.out.printf("%d partitions added to root entity \"%s\".%n",
logger.infof("%d partitions added to root entity \"%s\".%n",
classPartitions, rootEntity);

totalPartitions += classPartitions;
Expand All @@ -71,13 +75,13 @@ public PartitionPlan mapPartitions() throws Exception {

@Override
public int getPartitions() {
System.out.printf("#mapPartitions(): %d partitions.%n", TOTAL_PARTITIONS);
logger.infof("#mapPartitions(): %d partitions.%n", TOTAL_PARTITIONS);
return TOTAL_PARTITIONS;
}

@Override
public int getThreads() {
System.out.printf("#getThreads(): %d threads.%n", Math.min(TOTAL_PARTITIONS, threads));
logger.infof("#getThreads(): %d threads.%n", Math.min(TOTAL_PARTITIONS, threads));
return Math.min(TOTAL_PARTITIONS, threads);
}

Expand Down
Expand Up @@ -3,28 +3,32 @@
import javax.batch.api.partition.PartitionReducer;
import javax.inject.Named;

import org.jboss.logging.Logger;

@Named
public class LucenePartitionReducer implements PartitionReducer {

private static final Logger logger = Logger.getLogger(LucenePartitionReducer.class);

@Override
public void beginPartitionedStep() throws Exception {
System.out.println("#beginPartitionedStep() called.");
logger.debug("#beginPartitionedStep() called.");
}

@Override
public void beforePartitionedStepCompletion() throws Exception {
System.out.println("#beforePartitionedStepCompletion() called.");
logger.debug("#beforePartitionedStepCompletion() called.");
}

@Override
public void rollbackPartitionedStep() throws Exception {
System.out.println("#rollbackPartitionedStep() called.");
logger.debug("#rollbackPartitionedStep() called.");
}

@Override
public void afterPartitionedStepCompletion(PartitionStatus status)
throws Exception {
System.out.println("#afterPartitionedStepCompletion(...) called.");
logger.debug("#afterPartitionedStepCompletion(...) called.");
}

}
Expand Up @@ -4,12 +4,16 @@
import javax.batch.runtime.BatchStatus;
import javax.inject.Named;

import org.jboss.logging.Logger;

@Named
public class OptimizerBatchlet implements Batchlet {

private static final Logger logger = Logger.getLogger(OptimizerBatchlet.class);

@Override
public String process() throws Exception {
System.out.println("Optimizing ...");
logger.info("Optimizing ...");
return BatchStatus.COMPLETED.toString();
}

Expand Down
Expand Up @@ -6,6 +6,8 @@
import javax.inject.Inject;
import javax.inject.Named;

import org.jboss.logging.Logger;

/**
* Decider decides the next step-execution before the start of index chunk. If
* user requires a index purge, then the next step should be a purge, else,
Expand All @@ -21,14 +23,16 @@ public class PurgeDecider implements Decider {
@Inject @BatchProperty
private Boolean purgeAtStart;

private static final Logger logger = Logger.getLogger(PurgeDecider.class);

/**
* Decide the next step using the target batch property.
*
* @param executions step executions.
*/
@Override
public String decide(StepExecution[] executions) throws Exception {
System.out.printf("PurgeDecider#decide: purgeAtStart=%s.%n", purgeAtStart);
logger.infof("#decide: purgeAtStart=%s.%n", purgeAtStart);
return String.valueOf(purgeAtStart);
}
}
Expand Up @@ -3,6 +3,7 @@
import org.hibernate.search.store.IndexShardingStrategy;
import org.jboss.arquillian.container.test.api.Deployment;
import org.jboss.arquillian.junit.Arquillian;
import org.jboss.logging.Logger;
import org.jboss.shrinkwrap.api.ArchivePaths;
import org.jboss.shrinkwrap.api.ShrinkWrap;
import org.jboss.shrinkwrap.api.asset.EmptyAsset;
Expand All @@ -23,6 +24,8 @@ public class DeploymentTest {
private final int PARTITIONS = 4;
private final int THREADS = 2;

private static final Logger logger = Logger.getLogger(DeploymentTest.class);

@Deployment
public static WebArchive createDeployment() {
WebArchive war = ShrinkWrap.create(WebArchive.class)
Expand All @@ -31,7 +34,7 @@ public static WebArchive createDeployment() {
.addPackages(true, "org.hibernate.search.jsr352")
.addAsWebInfResource(EmptyAsset.INSTANCE, ArchivePaths.create("beans.xml"))
.addAsResource("META-INF/batch-jobs/mass-index.xml");
System.out.println(war.toString(true));
logger.info(war.toString(true));
return war;
}

Expand Down

0 comments on commit 9a95b24

Please sign in to comment.