Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(interactive): persist latest kafka queue offset before stopped #3800

Merged
merged 2 commits into from
Jun 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions charts/graphscope-store/templates/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ data:

gaia.rpc.port=60000
gaia.engine.port=60001
gaia.write.timeout.ms={{ .Values.pegasus.timeout }}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  1. This is not in consistent with the whole timeout setting in GIE, which is {.Values.query.execution.timeout.ms};
  2. Users are also allowed to set a timeout per query, while the timeout for tcp writer seems can only been set when the server started. Consider this case.


## Secondary config
secondary.instance.enabled={{ .Values.secondary.enabled }}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,6 @@ public class CommonConfig {

public static final Config<Integer> PARTITION_COUNT = Config.intConfig("partition.count", 1);

public static final Config<Long> METRIC_UPDATE_INTERVAL_MS =
Config.longConfig("metric.update.interval.ms", 5000L);

public static final Config<String> LOG4RS_CONFIG = Config.stringConfig("log4rs.config", "");

public static final Config<String> DISCOVERY_MODE =
Expand All @@ -74,8 +71,6 @@ public class CommonConfig {

public static final Config<Boolean> SECONDARY_INSTANCE_ENABLED =
Config.boolConfig("secondary.instance.enabled", false);
public static final Config<Boolean> TRACING_ENABLED =
Config.boolConfig("tracing.enabled", false);

// Create an extra store pod for each original store pod for backup.
// Only available in multi pod mode.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ public class CoordinatorConfig {
Config.boolConfig("log.recycle.enable", false);

public static final Config<Long> LOG_RECYCLE_INTERVAL_SECOND =
Config.longConfig("log.recycle.interval.second", 600L);
Config.longConfig("log.recycle.interval.second", 3600L);

public static final Config<String> FILE_META_STORE_PATH =
Config.stringConfig("file.meta.store.path", "./meta");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,17 +29,14 @@ public class StoreConfig {
public static final Config<Long> STORE_QUEUE_WAIT_MS =
Config.longConfig("store.queue.wait.ms", 3000L);

public static final Config<Long> STORE_COMMIT_INTERVAL_MS =
Config.longConfig("store.commit.interval.ms", 1000L);

public static final Config<Boolean> STORE_GC_ENABLE =
Config.boolConfig("store.gc.enable", true);

public static final Config<Long> STORE_GC_INTERVAL_MS =
Config.longConfig("store.gc.interval.ms", 5000L);
Config.longConfig("store.gc.interval.ms", 3600000L);

public static final Config<Long> STORE_CATCHUP_INTERVAL_MS =
Config.longConfig("store.catchup.interval.ms", 30000L);
Config.longConfig("store.catchup.interval.ms", 5000L);

// set by IS_SECONDARY_INSTANCE, used in graph.rs
public static final Config<String> STORE_STORAGE_ENGINE =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use std::time::Duration;

use crate::config::BlockMode;
use crate::receive::start_net_receiver;
use crate::send::start_net_sender;
use crate::transport::ConnectionParams;
Expand Down Expand Up @@ -55,6 +56,14 @@ pub fn listen_on<A: ToSocketAddrs>(
let remote = Server { id: remote_id, addr };
if params.is_nonblocking {
stream.set_nonblocking(true).ok();
} else {
if let BlockMode::Blocking(Some(write_timelout)) =
params.get_write_params().mode
{
stream
.set_write_timeout(Some(write_timelout))
.ok();
}
}
let recv_poisoned = Arc::new(AtomicBool::new(false));
start_net_sender(
Expand Down Expand Up @@ -127,6 +136,11 @@ pub fn connect(
let remote = Server { id: remote_id, addr };
if params.is_nonblocking {
conn.set_nonblocking(true).ok();
} else {
if let BlockMode::Blocking(Some(write_timelout)) = params.get_write_params().mode {
conn.set_write_timeout(Some(write_timelout))
.ok();
}
}
let read_half = conn
.try_clone()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ private void doRecycle() {
List<Long> queueOffsets = this.snapshotManager.getQueueOffsets();
for (int i = 0; i < queueOffsets.size(); i++) {
long offset = queueOffsets.get(i);
offset = Math.max(offset - 3600, 0); // Leave some spaces
try {
logService.deleteBeforeOffset(i, offset);
logger.info("recycled queue [{}] offset [{}]", i, offset);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,11 @@ public void start() {

public void stop() {
this.shouldStop = true;
try {
updateQueueOffsets();
} catch (IOException ex) {
logger.error("update queue offset failed", ex);
}
if (this.persistOffsetsScheduler != null) {
this.persistOffsetsScheduler.shutdown();
try {
Expand Down Expand Up @@ -163,8 +168,7 @@ private void updateQueueOffsets() throws IOException {
boolean changed = false;
List<Long> consumedOffsets = writerAgent.getConsumedQueueOffsets();
for (int qId = 0; qId < queueOffsets.size(); qId++) {
long minOffset = Long.MAX_VALUE;
minOffset = Math.min(consumedOffsets.get(qId), minOffset);
long minOffset = Math.min(consumedOffsets.get(qId), Long.MAX_VALUE);
if (minOffset != Long.MAX_VALUE && minOffset > newQueueOffsets.get(qId)) {
newQueueOffsets.set(qId, minOffset);
changed = true;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@ public class PartitionService {
public PartitionService(Configs configs, StoreService storeService) {
this.storeService = storeService;
this.isSecondary = CommonConfig.SECONDARY_INSTANCE_ENABLED.get(configs);
this.storeCatchupIntervalMS = StoreConfig.STORE_GC_INTERVAL_MS.get(configs);
// this.storeCatchupIntervalMS = StoreConfig.STORE_CATCHUP_INTERVAL_MS.get(configs);
this.storeCatchupIntervalMS = StoreConfig.STORE_CATCHUP_INTERVAL_MS.get(configs);

this.scheduler =
Executors.newScheduledThreadPool(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -167,8 +167,8 @@ private void processBatches() {
this.consumeSI = batchSI;
this.availSnapshotInfoRef.set(new SnapshotInfo(availSI, availDdlSI));
this.commitExecutor.execute(this::asyncCommit);
} else {
logger.warn("consumedSI {} >= batchSI {}, ignored", consumeSI, batchSI);
} else { // a flurry of batches with same snapshot ID
logger.debug("consumedSI {} >= batchSI {}, ignored", consumeSI, batchSI);
}
if (hasDdl) {
this.consumeDdlSnapshotId = batchSI;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@

import com.alibaba.graphscope.groot.common.config.CommonConfig;
import com.alibaba.graphscope.groot.common.config.Configs;
import com.alibaba.graphscope.groot.common.config.StoreConfig;
import com.alibaba.graphscope.groot.meta.MetaService;
import com.alibaba.graphscope.groot.operation.StoreDataBatch;
import com.alibaba.graphscope.groot.rpc.RoleClients;
Expand All @@ -34,11 +33,7 @@ public class WriterAgentTest {

@Test
void testWriterAgent() throws InterruptedException, ExecutionException {
Configs configs =
Configs.newBuilder()
.put(CommonConfig.NODE_IDX.getKey(), "0")
.put(StoreConfig.STORE_COMMIT_INTERVAL_MS.getKey(), "10")
.build();
Configs configs = Configs.newBuilder().put(CommonConfig.NODE_IDX.getKey(), "0").build();
StoreService mockStoreService = mock(StoreService.class);

MetaService mockMetaService = mock(MetaService.class);
Expand Down
Loading