Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable Pyright for Type Checking #56

Merged
merged 3 commits into from
May 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 4 additions & 0 deletions .github/workflows/python_lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,7 @@ jobs:
run: |
ruff format .
ruff check .

- name: Run Pyright
run: |
pyright
11 changes: 11 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,14 @@ select = ["I", "B", "E", "F", "SIM", "W", "C90"]

[tool.ruff.format]
indent-style = "space"

[tool.pyright]
typeCheckingMode = "basic"
exclude = [
"**/__pycache__",
"**/build/",
"setup.py",
"src/third_party/utils/protolib.py"
]
reportMissingImports = false
reportAttributeAccessIssue = false
1 change: 1 addition & 0 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
pyre-check==0.9.19
pyright==1.1.359
ruff==0.3.5
2 changes: 1 addition & 1 deletion src/converter/pytorch_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def initialize_attributes(self) -> None:
self.pytorch_time = None
self.pytorch_start_ts = None
self.pytorch_finish_ts = None
self.pytorch_nodes = None
self.pytorch_nodes = dict()
self.pytorch_root_nids = []

# Initialize node mapping dictionaries
Expand Down
34 changes: 17 additions & 17 deletions src/trace_link/trace_link.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,23 +29,23 @@ class KinetoOperator:

Attributes:
op_dict (Dict[str, Any]): Dictionary containing the operator data.
category (Optional[str]): Category of the operator.
name (Optional[str]): Name of the operator.
category (str): Category of the operator.
name (str): Name of the operator.
phase (Optional[str]): Phase of the operator.
inclusive_dur (int): Inclusive duration of the operator in microseconds.
exclusive_dur (int): Exclusive duration of the operator in microseconds.
timestamp (int): Timestamp of the operator in microseconds.
external_id (Optional[str]): External ID associated with the operator.
ev_idx (Optional[str]): Event index associated with the operator.
tid (Optional[int]): Thread ID associated with the operator.
external_id (str): External ID associated with the operator.
ev_idx (str): Event index associated with the operator.
tid (int): Thread ID associated with the operator.
pytorch_op (Optional[PyTorchOperator]): Associated PyTorch operator.
parent_pytorch_op_id (Optional[int]): ID of the parent PyTorch operator.
inter_thread_dep (Optional[int]): ID of the latest CPU node from other
threads before the gap.
stream (Optional[int]): Stream ID associated with the operator.
rf_id (Optional[int]): Record function ID.
correlation (Optional[int]): Correlation ID used to link CUDA runtime
operations with their GPU counterparts.
correlation (int): Correlation ID used to link CUDA runtime operations
with their GPU counterparts.
"""

def __init__(self, kineto_op: Dict[str, Any]) -> None:
Expand All @@ -57,25 +57,25 @@ def __init__(self, kineto_op: Dict[str, Any]) -> None:
operator data.
"""
self.op_dict = kineto_op
self.category = kineto_op.get("cat")
self.name = kineto_op.get("name")
self.category = kineto_op.get("cat", "")
self.name = kineto_op.get("name", "")
self.phase = kineto_op.get("ph")
self.inclusive_dur = kineto_op.get("dur", 0)
self.exclusive_dur = kineto_op.get("dur", 0)
self.timestamp = kineto_op.get("ts", 0)
self.external_id = None
self.ev_idx = None
self.tid = kineto_op.get("tid")
self.external_id = ""
self.ev_idx = ""
self.tid = kineto_op.get("tid", 0)
self.pytorch_op: Optional[PyTorchOperator] = None
self.parent_pytorch_op_id = None
self.inter_thread_dep: Optional[int] = None
self.stream: Optional[int] = None
self.rf_id: Optional[int] = None
self.correlation: Optional[int] = None
self.correlation: int = None

if "args" in kineto_op:
self.external_id = kineto_op["args"].get("External id")
self.ev_idx = kineto_op["args"].get("Ev Idx")
self.ev_idx = kineto_op["args"].get("Ev Idx", "")
self.stream = kineto_op["args"].get("stream")
if "Record function id" in kineto_op["args"]:
self.rf_id = int(kineto_op["args"]["Record function id"])
Expand Down Expand Up @@ -227,7 +227,7 @@ class TraceLinker:
latest operator timestamp.
kineto_thread_info (Dict[int, Tuple[int, int]]): Information about threads,
mapping thread IDs to a tuple of start and end times.
kineto_rf_id_to_kineto_op_map (Dict[str, KinetoOperator]): Mapping from
kineto_rf_id_to_kineto_op_map (Dict[int, KinetoOperator]): Mapping from
rf_id to KinetoOperator instances.
pytorch_op_id_to_kineto_ops_map (Dict[int, List[KinetoOperator]]):
Map from PyTorch op IDs to Kineto GPU ops.
Expand Down Expand Up @@ -266,7 +266,7 @@ def __init__(self, pytorch_et_file: str, kineto_file: str, log_level: str = "INF
self.kineto_process_start_time: int = 0
self.kineto_process_end_time: int = 0
self.kineto_thread_info: Dict[int, Tuple[int, int]] = {}
self.kineto_rf_id_to_kineto_op_map: Dict[str, KinetoOperator] = {}
self.kineto_rf_id_to_kineto_op_map: Dict[int, KinetoOperator] = {}
self.pytorch_op_id_to_kineto_ops_map: Dict[int, List[KinetoOperator]] = {}
self.pytorch_op_id_to_inclusive_dur_map: Dict[int, int] = {}
self.pytorch_op_id_to_exclusive_dur_map: Dict[int, int] = {}
Expand Down Expand Up @@ -761,7 +761,7 @@ def group_gpu_ops_by_cpu_launchers(self) -> Dict[str, List[KinetoOperator]]:
self.logger.warning(warning_msg)
continue

if parent_cpu_op.ev_idx is None:
if parent_cpu_op.ev_idx == "":
error_msg = (
f"Missing 'ev_idx' for CPU operator {parent_cpu_op.name}. "
f"Cannot link to GPU op {gpu_op.name} to {parent_cpu_op.name}."
Expand Down