Pipe: Double-living Semantic Correction to Forward Events from Cluster A to B by Default Unless They Originate from B#15112
Pipe: Double-living Semantic Correction to Forward Events from Cluster A to B by Default Unless They Originate from B#15112XNX02 wants to merge 32 commits intoapache:masterfrom
Conversation
| private AbstractDeleteDataNode deleteDataNode; | ||
| private DeletionResource deletionResource; | ||
| private boolean isGeneratedByPipe; | ||
| private String originClusterId; |
| ByteBuffer.allocate( | ||
| Byte.BYTES + planBuffer.limit() + computeOriginClusterIdBufferSize(originClusterId)); | ||
| ReadWriteIOUtils.write(isGeneratedByPipe, result); | ||
| ReadWriteIOUtils.write(originClusterId, result); |
| if (byteBuffer.hasRemaining()) { | ||
| boolean hasClusterId = byteBuffer.get() != 0; | ||
| if (hasClusterId) { | ||
| int strLength = byteBuffer.getShort(); | ||
| byte[] bytes = new byte[strLength]; | ||
| byteBuffer.get(bytes); | ||
| originClusterId = new String(bytes); | ||
| } else { | ||
| originClusterId = null; | ||
| } | ||
| } |
| : insertTabletNode.getOriginClusterId(); | ||
| if (Objects.isNull(workMemTable.getCurrentOriginClusterId())) { | ||
| workMemTable.setCurrentOriginClusterId(originClusterId); | ||
| } else if (!Objects.equals(originClusterId, workMemTable.getCurrentOriginClusterId())) { |
There was a problem hiding this comment.
originClusterId != workMemTable.getCurrentOriginClusterId()
There was a problem hiding this comment.
Make a static final cache map in the receiver and get the id string from the map
map <id, id> putIfAbsent
There was a problem hiding this comment.
comment here if using the ref comparing
| } | ||
| final String originClusterId = | ||
| insertTabletNode.getOriginClusterId() == null | ||
| ? config.getClusterId() |
| insertTabletNode.getOriginClusterId() == null | ||
| ? config.getClusterId() | ||
| : insertTabletNode.getOriginClusterId(); | ||
| if (Objects.isNull(workMemTable.getCurrentOriginClusterId())) { |
|
|
||
| @Override | ||
| public String toString() { | ||
| return "PipeEnrichedPlanV2{" + "innerPlan='" + innerPlan + "'}"; |
There was a problem hiding this comment.
Better add "originClusterId" here
| */ | ||
| public TSStatus operatePermission(AuthorPlan authorPlan, boolean isGeneratedByPipe) { | ||
| public TSStatus operatePermission( | ||
| AuthorPlan authorPlan, boolean isGeneratedByPipe, String originClusterIds) { |
There was a problem hiding this comment.
originClusterId... Same in other files
| @@ -87,6 +87,19 @@ public AuthOperationProcedure( | |||
| this.timeoutMS = commonConfig.getDatanodeTokenTimeoutMS(); | |||
| } | |||
There was a problem hiding this comment.
May remove the unused constructors... Same in other files
| public void pipeEnrichedPlanTest() throws IOException { | ||
| final PipeEnrichedPlan plan = | ||
| new PipeEnrichedPlan( | ||
| final PipeEnrichedPlanV1 plan = |
There was a problem hiding this comment.
Better use "V2".. Same in other tests
| isGeneratedByPipe = ReadWriteIOUtils.readBool(buffer); | ||
| configPhysicalPlan = ConfigPhysicalPlan.Factory.create(buffer); | ||
|
|
||
| // There might be an ignoredChildrenSize 0 |
There was a problem hiding this comment.
The size may not appear on configNodes...
| } | ||
| } | ||
|
|
||
| public Set<String> getSinkClusterIds() { |
| } | ||
|
|
||
| public void setSinkClusterIds(Set<String> sinkClusterIds) { | ||
| realtimeExtractor.setSinkClusterIds(sinkClusterIds); |
There was a problem hiding this comment.
Personally I think it's better to put this in IoTDBExtractor...
| @Override | ||
| public RegionExecutionResult visitPipeEnrichedWritePlanNode( | ||
| final PipeEnrichedWritePlanNode node, final WritePlanNodeExecutionContext context) { | ||
| node.setOriginClusterId(node.getOriginClusterId()); |
There was a problem hiding this comment.
Seemingly some extra operations are needed to pass the "originClusterId" to the state machine....
| 1: required list<common.TConsensusGroupId> schemaRegionIdList | ||
| 2: required binary pathPatternTree | ||
| 3: optional bool isGeneratedByPipe | ||
| 4: optional string originCluster |
| struct TPipeTransferResp { | ||
| 1:required common.TSStatus status | ||
| 2:optional binary body | ||
| 3:optional string clusterId |
There was a problem hiding this comment.
Better use "body" instead of adding a new field?
| if (pipeConnector instanceof IoTDBDataNodeSyncConnector) { | ||
| attributeSortedStringToSinkClusterIdsMap.put( | ||
| attributeSortedString, | ||
| ((IoTDBDataNodeSyncConnector) pipeConnector).getEndPointsClusterIds()); |
There was a problem hiding this comment.
Better put this in "IoTDBConnector"....
| insertRowStatement.getValues(), | ||
| insertRowStatement.isNeedInferType()); | ||
| insertNode.setFailedMeasurementNumber(insertRowStatement.getFailedMeasurementNumber()); | ||
| insertNode.setOriginClusterId(insertRowStatement.getOriginClusterId()); |
There was a problem hiding this comment.
Do we really need this?
|
|
||
| protected abstract void confineHistoricalEventTransferTypes(final PipeSnapshotEvent event); | ||
|
|
||
| public Set<String> getSinkClusterIds() { |
| private long tabletConversionThresholdBytes = -1; | ||
| private boolean autoCreateDatabase = true; | ||
| private boolean isGeneratedByPipe = false; | ||
| private String originClusterId; |
|
|
||
| protected long ramBytesUsed = Long.MIN_VALUE; | ||
|
|
||
| protected String originClusterId; |
| protected String password = CONNECTOR_IOTDB_PASSWORD_DEFAULT_VALUE; | ||
|
|
||
| // Used to store the clusterId for location comparison | ||
| public static final Map<String, String> CLUSTER_ID_MAP = new HashMap<>(); |
There was a problem hiding this comment.
Why not use a hashSet....
| public static PipeEnrichedWritePlanNode deserialize(final ByteBuffer buffer) { | ||
| return new PipeEnrichedWritePlanNode((WritePlanNode) PlanNodeType.deserialize(buffer)); | ||
| return new PipeEnrichedWritePlanNode( | ||
| (WritePlanNode) PlanNodeType.deserialize(buffer), |
There was a problem hiding this comment.
Will there be some ignored children sizes..
| @Override | ||
| public RegionExecutionResult visitPipeEnrichedWritePlanNode( | ||
| final PipeEnrichedWritePlanNode node, final WritePlanNodeExecutionContext context) { | ||
| node.setOriginClusterId(node.getOriginClusterId()); |
There was a problem hiding this comment.
Seemingly some extra operations are needed to pass the "originClusterId" to the state machine....
| return new PipeEnrichedInsertNode((InsertNode) PlanNodeType.deserialize(buffer)); | ||
| return new PipeEnrichedInsertNode( | ||
| (InsertNode) PlanNodeType.deserialize(buffer), | ||
| buffer.hasRemaining() ? ReadWriteIOUtils.readString(buffer) : null); |
There was a problem hiding this comment.
What about ignore children size?
| public static PipeEnrichedNonWritePlanNode deserialize(ByteBuffer buffer) { | ||
| return new PipeEnrichedNonWritePlanNode(PlanNodeType.deserialize(buffer)); | ||
| return new PipeEnrichedNonWritePlanNode( | ||
| PlanNodeType.deserialize(buffer), |
There was a problem hiding this comment.
What about ignored children size?
| endTime); | ||
| } | ||
|
|
||
| public PipeTsFileInsertionEvent( |
| private final LoadTsFileDataCacheMemoryBlock block; | ||
| private final String originClusterId; | ||
|
|
||
| public LoadTsFileScheduler( |
|
Will any similar mechanism be applied to "WriteBackSink" (i.e. write-back-sink won't transfer data written back once)? |
No description provided.