Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[GOBBLIN-284] Add retry in SalesforceExtractor to handle transient ne… #2137

Closed
wants to merge 3 commits into from
Closed
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,8 @@ public class SalesforceExtractor extends RestApiExtractor {
private static final int MAX_RETRY_INTERVAL_SECS = 600;
// avoid using too many bulk API calls by only allowing PK chunking only if max partitions is configured <= this
private static final int PK_CHUNKING_MAX_PARTITIONS_LIMIT = 3;
private static final String FETCH_RETRY_LIMIT_KEY = "salesforce.fetchRetryLimit";
private static final int DEFAULT_FETCH_RETRY_LIMIT = 5;

private boolean pullStatus = true;
private String nextUrl;
Expand All @@ -124,10 +126,13 @@ public class SalesforceExtractor extends RestApiExtractor {
private boolean newBulkResultSet = true;
private int bulkRecordCount = 0;
private int prevBulkRecordCount = 0;
private List<String> csvRecord;

private final boolean pkChunking;
private final int pkChunkingSize;
private final SalesforceConnector sfConnector;
private final int fetchRetryLimit;
private final int batchSize;

public SalesforceExtractor(WorkUnitState state) {
super(state);
Expand All @@ -149,6 +154,13 @@ public SalesforceExtractor(WorkUnitState state) {
this.pkChunkingSize =
Math.max(MIN_PK_CHUNKING_SIZE,
Math.min(MAX_PK_CHUNKING_SIZE, state.getPropAsInt(PK_CHUNKING_SIZE_KEY, DEFAULT_PK_CHUNKING_SIZE)));

// Get batch size from .pull file
int tmpBatchSize = state.getPropAsInt(ConfigurationKeys.SOURCE_QUERYBASED_FETCH_SIZE,
ConfigurationKeys.DEFAULT_SOURCE_FETCH_SIZE);

this.batchSize = tmpBatchSize == 0 ? ConfigurationKeys.DEFAULT_SOURCE_FETCH_SIZE : tmpBatchSize;
this.fetchRetryLimit = state.getPropAsInt(FETCH_RETRY_LIMIT_KEY, DEFAULT_FETCH_RETRY_LIMIT);
}

@Override
Expand Down Expand Up @@ -581,9 +593,12 @@ public Iterator<JsonElement> getRecordSetFromSourceApi(String schema, String ent

// Get data from input stream
// If bulk load is not finished, get data from the stream
if (!this.isBulkJobFinished()) {
rs = getBulkData();
}
// Skip empty result sets since they will cause the extractor to terminate early
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it better to do the skip loop in getBulkData

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

getBulkData has a couple places where it returns, so I thought that this was cleaner and less error prone.

do {
if (!this.isBulkJobFinished()) {
rs = getBulkData();
}
} while (rs != null && rs.isEmpty() && !this.isBulkJobFinished());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

while ((rs == null || rs.isEmpty()) && !this.isBulkJobFinished()) {
  rs = getBulkData();
}


// Set bulkApiInitialRun to false after the completion of first run
this.bulkApiInitialRun = false;
Expand Down Expand Up @@ -774,6 +789,122 @@ private List<BatchIdAndResultId> getQueryResultIds(String entity, List<Predicate
}
}

/**
* Get a buffered reader wrapping the query result stream for the result with the specified index
* @param index index the {@link #bulkResultIdList}
* @return a {@link BufferedReader}
* @throws AsyncApiException
*/
private BufferedReader getBulkBufferedReader(int index) throws AsyncApiException {
return new BufferedReader(new InputStreamReader(
this.bulkConnection.getQueryResultStream(this.bulkJob.getId(), this.bulkResultIdList.get(index).getBatchId(),
this.bulkResultIdList.get(index).getResultId()), ConfigurationKeys.DEFAULT_CHARSET_ENCODING));
}

/**
* Fetch a batch of records
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's better to make it clearer that this batch isn't the chunk batch, but the extractor is grouping the records

* @param rs the record set to fetch into
* @param initialRecordCount Initial record count to use. This should correspond to the number of records already in rs
* @throws DataRecordException
* @throws IOException
*/
private void fetchResultBatch(RecordSetList<JsonElement> rs, int initialRecordCount)
throws DataRecordException, IOException {
int recordCount = initialRecordCount;

// Stream the resultset through CSV reader to identify columns in each record
InputStreamCSVReader reader = new InputStreamCSVReader(this.bulkBufferedReader);

// Get header if it is first run of a new resultset
if (this.isNewBulkResultSet()) {
this.bulkRecordHeader = reader.nextRecord();
this.bulkResultColumCount = this.bulkRecordHeader.size();
this.setNewBulkResultSet(false);
}

// Get record from CSV reader stream
while ((this.csvRecord = reader.nextRecord()) != null) {
// Convert CSV record to JsonObject
JsonObject jsonObject = Utils.csvToJsonObject(this.bulkRecordHeader, this.csvRecord, this.bulkResultColumCount);
rs.add(jsonObject);
recordCount++;
this.bulkRecordCount++;

// Insert records in record set until it reaches the batch size
if (recordCount >= batchSize) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

while loop already has a check, is it necessary to have this extra check?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the existing logic moved into a new method. The check here is required to chunk the stream into batches since the while loop is only checking for end of stream. This check could be moved into the while, but I think it is here for the extra logging when the condition is true.

log.info("Total number of records processed so far: " + this.bulkRecordCount);
break;
}
}
}

/**
* Reinitialize the state of {@link #bulkBufferedReader} to handle network disconnects
* @throws IOException
* @throws AsyncApiException
*/
private void reinitializeBufferedReader() throws IOException, AsyncApiException {
// close reader and get a new input stream to reconnect to resolve intermittent network errors
this.bulkBufferedReader.close();
this.bulkBufferedReader = getBulkBufferedReader(this.bulkResultIdCount - 1);

// if the result set is partially processed then we need to skip over processed records
if (!isNewBulkResultSet()) {
List<String> lastCsvRecord = null;
InputStreamCSVReader reader = new InputStreamCSVReader(this.bulkBufferedReader);

// skip header
reader.nextRecord();

int recordsToSkip = this.bulkRecordCount - this.prevBulkRecordCount;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If a batch is processed one after the other, it's not necessary to skip but clearing the result set?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, there are two reasons for this.

  1. Clearing and recreating records would be more expensive than skipping.
  2. The reset moves to the beginning of the result set. There can be many 2K record batches in a result set and all except the last batch have been processed and given out of the extractor, so we need to skip over at least all batches prior to the last one.

log.info("Skipping {} records on retry: ", recordsToSkip);

for (int i = 0; i < recordsToSkip; i++) {
lastCsvRecord = reader.nextRecord();
}

// make sure the last record processed before the error was the last record skipped so that the next
// unprocessed record is processed in the next call to fetchResultBatch()
if (recordsToSkip > 0) {
if (!this.csvRecord.equals(lastCsvRecord)) {
throw new RuntimeException("Repositioning after reconnecting did not point to the expected record");
}
}
}
}

/**
* Fetch a result batch with retry for network errors
* @param rs the {@link RecordSetList} to fetch into
*/
private void fetchResultBatchWithRetry(RecordSetList<JsonElement> rs)
throws AsyncApiException, DataRecordException, IOException {
boolean success = false;
int retryCount = 0;
int recordCountBeforeFetch = this.bulkRecordCount;

do {
try {
// reinitialize the reader to establish a new connection to handle transient network errors
if (retryCount > 0) {
reinitializeBufferedReader();
}

// on retries there may already be records in rs, so pass the number of records as the initial count
fetchResultBatch(rs, this.bulkRecordCount - recordCountBeforeFetch);
success = true;
} catch (IOException e) {
if (retryCount < this.fetchRetryLimit) {
log.info("Exception while fetching data, retrying: " + e.getMessage(), e);
retryCount++;
} else {
log.error("Exception while fetching data: " + e.getMessage(), e);
throw e;
}
}
} while (!success);
}

/**
* Get data from the bulk api input stream
* @return record set with each record as a JsonObject
Expand All @@ -796,14 +927,12 @@ private RecordSet<JsonElement> getBulkData() throws DataRecordException {
if (this.bulkResultIdCount < this.bulkResultIdList.size()) {
log.info("Stream resultset for resultId:" + this.bulkResultIdList.get(this.bulkResultIdCount));
this.setNewBulkResultSet(true);
this.bulkBufferedReader =
new BufferedReader(
new InputStreamReader(
this.bulkConnection.getQueryResultStream(this.bulkJob.getId(),
this.bulkResultIdList.get(this.bulkResultIdCount).getBatchId(),
this.bulkResultIdList.get(this.bulkResultIdCount).getResultId()),
ConfigurationKeys.DEFAULT_CHARSET_ENCODING));

if (this.bulkBufferedReader != null) {
this.bulkBufferedReader.close();
}

this.bulkBufferedReader = getBulkBufferedReader(this.bulkResultIdCount);
this.bulkResultIdCount++;
this.prevBulkRecordCount = bulkRecordCount;
} else {
Expand All @@ -814,41 +943,8 @@ private RecordSet<JsonElement> getBulkData() throws DataRecordException {
}
}

// if Buffer stream has data then process the same

// Get batch size from .pull file
int batchSize = Utils.getAsInt(this.workUnitState.getProp(ConfigurationKeys.SOURCE_QUERYBASED_FETCH_SIZE));
if (batchSize == 0) {
batchSize = ConfigurationKeys.DEFAULT_SOURCE_FETCH_SIZE;
}

// Stream the resultset through CSV reader to identify columns in each record
InputStreamCSVReader reader = new InputStreamCSVReader(this.bulkBufferedReader);

// Get header if it is first run of a new resultset
if (this.isNewBulkResultSet()) {
this.bulkRecordHeader = reader.nextRecord();
this.bulkResultColumCount = this.bulkRecordHeader.size();
this.setNewBulkResultSet(false);
}

List<String> csvRecord;
int recordCount = 0;

// Get record from CSV reader stream
while ((csvRecord = reader.nextRecord()) != null) {
// Convert CSV record to JsonObject
JsonObject jsonObject = Utils.csvToJsonObject(this.bulkRecordHeader, csvRecord, this.bulkResultColumCount);
rs.add(jsonObject);
recordCount++;
this.bulkRecordCount++;

// Insert records in record set until it reaches the batch size
if (recordCount >= batchSize) {
log.info("Total number of records processed so far: " + this.bulkRecordCount);
break;
}
}
// fetch a batch of results with retry for network errors
fetchResultBatchWithRetry(rs);

} catch (Exception e) {
throw new DataRecordException("Failed to get records from salesforce; error - " + e.getMessage(), e);
Expand Down