From d3f0e3b2ef2881d89718b38e178335a569d5b506 Mon Sep 17 00:00:00 2001 From: varun-r-mallya Date: Tue, 14 Oct 2025 03:09:18 +0530 Subject: [PATCH 1/6] remove tbaa_gen and make IR generator module --- pythonbpf/tbaa_gen/__init__.py | 0 pythonbpf/vmlinux_parser/import_detector.py | 2 +- pythonbpf/vmlinux_parser/ir_gen/__init__.py | 3 +++ pythonbpf/vmlinux_parser/{ => ir_gen}/ir_generation.py | 2 +- 4 files changed, 5 insertions(+), 2 deletions(-) delete mode 100644 pythonbpf/tbaa_gen/__init__.py create mode 100644 pythonbpf/vmlinux_parser/ir_gen/__init__.py rename pythonbpf/vmlinux_parser/{ => ir_gen}/ir_generation.py (85%) diff --git a/pythonbpf/tbaa_gen/__init__.py b/pythonbpf/tbaa_gen/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/pythonbpf/vmlinux_parser/import_detector.py b/pythonbpf/vmlinux_parser/import_detector.py index 2ce9cb54..f5789cea 100644 --- a/pythonbpf/vmlinux_parser/import_detector.py +++ b/pythonbpf/vmlinux_parser/import_detector.py @@ -5,7 +5,7 @@ import inspect from .dependency_handler import DependencyHandler -from .ir_generation import IRGenerator +from .ir_gen import IRGenerator from .class_handler import process_vmlinux_class logger = logging.getLogger(__name__) diff --git a/pythonbpf/vmlinux_parser/ir_gen/__init__.py b/pythonbpf/vmlinux_parser/ir_gen/__init__.py new file mode 100644 index 00000000..3a136511 --- /dev/null +++ b/pythonbpf/vmlinux_parser/ir_gen/__init__.py @@ -0,0 +1,3 @@ +from .ir_generation import IRGenerator + +__all__ = ["IRGenerator"] diff --git a/pythonbpf/vmlinux_parser/ir_generation.py b/pythonbpf/vmlinux_parser/ir_gen/ir_generation.py similarity index 85% rename from pythonbpf/vmlinux_parser/ir_generation.py rename to pythonbpf/vmlinux_parser/ir_gen/ir_generation.py index c66ba112..e4dae7dd 100644 --- a/pythonbpf/vmlinux_parser/ir_generation.py +++ b/pythonbpf/vmlinux_parser/ir_gen/ir_generation.py @@ -1,5 +1,5 @@ import logging -from .dependency_handler import DependencyHandler +from pythonbpf.vmlinux_parser.dependency_handler import DependencyHandler logger = logging.getLogger(__name__) From 11e8e721886d7c1379ec61a308117a55ec2b7719 Mon Sep 17 00:00:00 2001 From: varun-r-mallya Date: Wed, 15 Oct 2025 02:00:23 +0530 Subject: [PATCH 2/6] add base for ir gen --- .../vmlinux_parser/dependency_handler.py | 20 +++++++++++++++++++ .../vmlinux_parser/ir_gen/ir_generation.py | 20 ++++++++++++++++++- tests/failing_tests/xdp_pass.py | 6 ++++-- 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/pythonbpf/vmlinux_parser/dependency_handler.py b/pythonbpf/vmlinux_parser/dependency_handler.py index fb49b00c..b960ab3e 100644 --- a/pythonbpf/vmlinux_parser/dependency_handler.py +++ b/pythonbpf/vmlinux_parser/dependency_handler.py @@ -147,3 +147,23 @@ def __len__(self) -> int: int: The number of nodes """ return len(self._nodes) + + def __getitem__(self, name: str) -> DependencyNode: + """ + Get a node by name using dictionary-style access. + + Args: + name: The name of the node to retrieve + + Returns: + DependencyNode: The node with the given name + + Raises: + KeyError: If no node with the given name exists + + Example: + node = handler["some-dep_node_name"] + """ + if name not in self._nodes: + raise KeyError(f"No node with name '{name}' found") + return self._nodes[name] diff --git a/pythonbpf/vmlinux_parser/ir_gen/ir_generation.py b/pythonbpf/vmlinux_parser/ir_gen/ir_generation.py index e4dae7dd..1a2be626 100644 --- a/pythonbpf/vmlinux_parser/ir_gen/ir_generation.py +++ b/pythonbpf/vmlinux_parser/ir_gen/ir_generation.py @@ -8,10 +8,28 @@ class IRGenerator: def __init__(self, module, handler: DependencyHandler): self.module = module self.handler: DependencyHandler = handler + self.generated: list[str] = [] if not handler.is_ready: raise ImportError( "Semantic analysis of vmlinux imports failed. Cannot generate IR" ) for struct in handler: - print(struct) + self.struct_processor(struct) print() + + def struct_processor(self, struct): + if struct.name not in self.generated: + print(f"IR generating for {struct.name}") + print(f"Struct is {struct}") + for dependency in struct.depends_on: + if dependency not in self.generated: + dep_node_from_dependency = self.handler[dependency] + self.struct_processor(dep_node_from_dependency) + self.generated.append(dependency) + # write actual processor logic here after assuming all dependencies are resolved + # this part cannot yet resolve circular dependencies. Gets stuck on an infinite loop during that. + self.generated.append(struct.name) + + + def struct_name_generator(self, ): + pass diff --git a/tests/failing_tests/xdp_pass.py b/tests/failing_tests/xdp_pass.py index 6d6be86a..f44910d6 100644 --- a/tests/failing_tests/xdp_pass.py +++ b/tests/failing_tests/xdp_pass.py @@ -1,9 +1,11 @@ from pythonbpf import bpf, map, section, bpfglobal, compile_to_ir from pythonbpf.maps import HashMap from pythonbpf.helper import XDP_PASS +# from vmlinux import struct_request +from vmlinux import struct_trace_event_raw_sys_enter from vmlinux import struct_xdp_md -from vmlinux import struct_trace_event_raw_sys_enter # noqa: F401 -from vmlinux import struct_ring_buffer_per_cpu # noqa: F401 +# from vmlinux import struct_trace_event_raw_sys_enter # noqa: F401 +# from vmlinux import struct_ring_buffer_per_cpu # noqa: F401 from ctypes import c_int64 From 69b73003caa507d83a6b90e195ee806dbaf59d4b Mon Sep 17 00:00:00 2001 From: varun-r-mallya Date: Wed, 15 Oct 2025 04:42:38 +0530 Subject: [PATCH 3/6] setup skeleton for offset calculation --- pythonbpf/vmlinux_parser/class_handler.py | 1 + pythonbpf/vmlinux_parser/dependency_node.py | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/pythonbpf/vmlinux_parser/class_handler.py b/pythonbpf/vmlinux_parser/class_handler.py index cf82e50d..ce085308 100644 --- a/pythonbpf/vmlinux_parser/class_handler.py +++ b/pythonbpf/vmlinux_parser/class_handler.py @@ -71,6 +71,7 @@ def process_vmlinux_post_ast( if len(field_elem) == 2: field_name, field_type = field_elem elif len(field_elem) == 3: + raise NotImplementedError("Bitfields are not supported in the current version") field_name, field_type, bitfield_size = field_elem field_table[field_name] = [field_type, bitfield_size] elif hasattr(class_obj, "__annotations__"): diff --git a/pythonbpf/vmlinux_parser/dependency_node.py b/pythonbpf/vmlinux_parser/dependency_node.py index 7f32323a..8a512cdd 100644 --- a/pythonbpf/vmlinux_parser/dependency_node.py +++ b/pythonbpf/vmlinux_parser/dependency_node.py @@ -13,6 +13,7 @@ class Field: containing_type: Optional[Any] type_size: Optional[int] bitfield_size: Optional[int] + offset: int value: Any = None ready: bool = False @@ -60,6 +61,10 @@ def set_bitfield_size(self, bitfield_size: Any, mark_ready: bool = False) -> Non if mark_ready: self.ready = True + def set_offset(self, offset: int) -> None: + """Set the offset of this field""" + self.offset = offset + @dataclass class DependencyNode: @@ -109,6 +114,7 @@ class DependencyNode: depends_on: Optional[list[str]] = None fields: Dict[str, Field] = field(default_factory=dict) _ready_cache: Optional[bool] = field(default=None, repr=False) + current_offset: int = 0 def add_field( self, @@ -120,6 +126,7 @@ def add_field( ctype_complex_type: Optional[int] = None, bitfield_size: Optional[int] = None, ready: bool = False, + offset: int = 0, ) -> None: """Add a field to the node with an optional initial value and readiness state.""" if self.depends_on is None: @@ -133,6 +140,7 @@ def add_field( type_size=type_size, ctype_complex_type=ctype_complex_type, bitfield_size=bitfield_size, + offset=offset ) # Invalidate readiness cache self._ready_cache = None @@ -209,9 +217,14 @@ def set_field_ready(self, name: str, is_ready: bool = False) -> None: raise KeyError(f"Field '{name}' does not exist in node '{self.name}'") self.fields[name].set_ready(is_ready) + self.fields[name].set_offset(self.current_offset) + self.current_offset += self._calculate_size(name) + # Invalidate readiness cache self._ready_cache = None + def _calculate_size(self, name: str) -> int: + pass @property def is_ready(self) -> bool: """Check if the node is ready (all fields are ready).""" From a4cfc2b7aafd0f4c77a4da400b2c448c1d16834a Mon Sep 17 00:00:00 2001 From: varun-r-mallya Date: Wed, 15 Oct 2025 17:49:20 +0530 Subject: [PATCH 4/6] add assignments table and offset handler --- pythonbpf/vmlinux_parser/class_handler.py | 10 ++-- pythonbpf/vmlinux_parser/dependency_node.py | 49 ++++++++++++++++--- pythonbpf/vmlinux_parser/import_detector.py | 8 ++- .../vmlinux_parser/ir_gen/ir_generation.py | 5 +- tests/failing_tests/xdp_pass.py | 3 +- 5 files changed, 62 insertions(+), 13 deletions(-) diff --git a/pythonbpf/vmlinux_parser/class_handler.py b/pythonbpf/vmlinux_parser/class_handler.py index ce085308..07029394 100644 --- a/pythonbpf/vmlinux_parser/class_handler.py +++ b/pythonbpf/vmlinux_parser/class_handler.py @@ -71,7 +71,9 @@ def process_vmlinux_post_ast( if len(field_elem) == 2: field_name, field_type = field_elem elif len(field_elem) == 3: - raise NotImplementedError("Bitfields are not supported in the current version") + raise NotImplementedError( + "Bitfields are not supported in the current version" + ) field_name, field_type, bitfield_size = field_elem field_table[field_name] = [field_type, bitfield_size] elif hasattr(class_obj, "__annotations__"): @@ -145,7 +147,8 @@ def process_vmlinux_post_ast( process_vmlinux_post_ast( containing_type, llvm_handler, handler, processing_stack ) - new_dep_node.set_field_ready(elem_name, True) + size_of_containing_type = (handler[containing_type.__name__]).__sizeof__() + new_dep_node.set_field_ready(elem_name, True, size_of_containing_type) elif containing_type.__module__ == ctypes.__name__: logger.debug(f"Processing ctype internal{containing_type}") new_dep_node.set_field_ready(elem_name, True) @@ -162,7 +165,8 @@ def process_vmlinux_post_ast( process_vmlinux_post_ast( elem_type, llvm_handler, handler, processing_stack ) - new_dep_node.set_field_ready(elem_name, True) + size_of_containing_type = (handler[elem_type.__name__]).__sizeof__() + new_dep_node.set_field_ready(elem_name, True, size_of_containing_type) else: raise ValueError( f"{elem_name} with type {elem_type} from module {module_name} not supported in recursive resolver" diff --git a/pythonbpf/vmlinux_parser/dependency_node.py b/pythonbpf/vmlinux_parser/dependency_node.py index 8a512cdd..a6d4013a 100644 --- a/pythonbpf/vmlinux_parser/dependency_node.py +++ b/pythonbpf/vmlinux_parser/dependency_node.py @@ -1,5 +1,6 @@ from dataclasses import dataclass, field from typing import Dict, Any, Optional +import ctypes # TODO: FIX THE FUCKING TYPE NAME CONVENTION. @@ -140,11 +141,14 @@ def add_field( type_size=type_size, ctype_complex_type=ctype_complex_type, bitfield_size=bitfield_size, - offset=offset + offset=offset, ) # Invalidate readiness cache self._ready_cache = None + def __sizeof__(self): + return self.current_offset + def get_field(self, name: str) -> Field: """Get a field by name.""" return self.fields[name] @@ -211,20 +215,53 @@ def set_field_bitfield_size( # Invalidate readiness cache self._ready_cache = None - def set_field_ready(self, name: str, is_ready: bool = False) -> None: + def set_field_ready(self, name: str, is_ready: bool = False, size_of_containing_type: Optional[int] = None) -> None: """Mark a field as ready or not ready.""" if name not in self.fields: raise KeyError(f"Field '{name}' does not exist in node '{self.name}'") self.fields[name].set_ready(is_ready) self.fields[name].set_offset(self.current_offset) - self.current_offset += self._calculate_size(name) - + self.current_offset += self._calculate_size(name, size_of_containing_type) # Invalidate readiness cache self._ready_cache = None - def _calculate_size(self, name: str) -> int: - pass + def _calculate_size(self, name: str, size_of_containing_type: Optional[int] = None) -> int: + processing_field = self.fields[name] + # size_of_field will be in bytes + if processing_field.type.__module__ == ctypes.__name__: + size_of_field = ctypes.sizeof(processing_field.type) + return size_of_field + elif processing_field.type.__module__ == "vmlinux": + size_of_field: int = 0 + if processing_field.ctype_complex_type is not None: + if issubclass(processing_field.ctype_complex_type, ctypes.Array): + if processing_field.containing_type.__module__ == ctypes.__name__: + size_of_field = ( + ctypes.sizeof(processing_field.containing_type) + * processing_field.type_size + ) + return size_of_field + elif processing_field.containing_type.__module__ == "vmlinux": + size_of_field = ( + size_of_containing_type + * processing_field.type_size + ) + return size_of_field + elif issubclass(processing_field.ctype_complex_type, ctypes._Pointer): + return ctypes.sizeof(ctypes.pointer()) + else: + raise NotImplementedError( + "This subclass of ctype not supported yet" + ) + else: + # search up pre-created stuff and get size + return size_of_containing_type + + else: + raise ModuleNotFoundError("Module is not supported for the operation") + raise RuntimeError("control should not reach here") + @property def is_ready(self) -> bool: """Check if the node is ready (all fields are ready).""" diff --git a/pythonbpf/vmlinux_parser/import_detector.py b/pythonbpf/vmlinux_parser/import_detector.py index f5789cea..e314a355 100644 --- a/pythonbpf/vmlinux_parser/import_detector.py +++ b/pythonbpf/vmlinux_parser/import_detector.py @@ -129,7 +129,13 @@ def vmlinux_proc(tree: ast.AST, module): ) IRGenerator(module, handler) + return assignments def process_vmlinux_assign(node, module, assignments: Dict[str, type]): - raise NotImplementedError("Assignment handling has not been implemented yet") + # Check if this is a simple assignment with a constant value + if len(node.targets) == 1 and isinstance(node.targets[0], ast.Name): + target_name = node.targets[0].id + if isinstance(node.value, ast.Constant): + assignments[target_name] = node.value.value + logger.info(f"Added assignment: {target_name} = {node.value.value}") diff --git a/pythonbpf/vmlinux_parser/ir_gen/ir_generation.py b/pythonbpf/vmlinux_parser/ir_gen/ir_generation.py index 1a2be626..d500cf06 100644 --- a/pythonbpf/vmlinux_parser/ir_gen/ir_generation.py +++ b/pythonbpf/vmlinux_parser/ir_gen/ir_generation.py @@ -30,6 +30,7 @@ def struct_processor(self, struct): # this part cannot yet resolve circular dependencies. Gets stuck on an infinite loop during that. self.generated.append(struct.name) - - def struct_name_generator(self, ): + def struct_name_generator( + self, + ) -> None: pass diff --git a/tests/failing_tests/xdp_pass.py b/tests/failing_tests/xdp_pass.py index f44910d6..da438c8b 100644 --- a/tests/failing_tests/xdp_pass.py +++ b/tests/failing_tests/xdp_pass.py @@ -1,8 +1,9 @@ from pythonbpf import bpf, map, section, bpfglobal, compile_to_ir from pythonbpf.maps import HashMap from pythonbpf.helper import XDP_PASS +from vmlinux import TASK_COMM_LEN # noqa: F401 +from vmlinux import struct_trace_event_raw_sys_enter # noqa: F401 # from vmlinux import struct_request -from vmlinux import struct_trace_event_raw_sys_enter from vmlinux import struct_xdp_md # from vmlinux import struct_trace_event_raw_sys_enter # noqa: F401 # from vmlinux import struct_ring_buffer_per_cpu # noqa: F401 From 8239097fbb308e1bead8530a662f6024556b74e0 Mon Sep 17 00:00:00 2001 From: varun-r-mallya Date: Wed, 15 Oct 2025 17:49:38 +0530 Subject: [PATCH 5/6] format chore --- pythonbpf/vmlinux_parser/class_handler.py | 16 ++++++++++++---- pythonbpf/vmlinux_parser/dependency_node.py | 14 ++++++++++---- tests/failing_tests/xdp_pass.py | 1 + 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/pythonbpf/vmlinux_parser/class_handler.py b/pythonbpf/vmlinux_parser/class_handler.py index 07029394..50f2fd65 100644 --- a/pythonbpf/vmlinux_parser/class_handler.py +++ b/pythonbpf/vmlinux_parser/class_handler.py @@ -147,8 +147,12 @@ def process_vmlinux_post_ast( process_vmlinux_post_ast( containing_type, llvm_handler, handler, processing_stack ) - size_of_containing_type = (handler[containing_type.__name__]).__sizeof__() - new_dep_node.set_field_ready(elem_name, True, size_of_containing_type) + size_of_containing_type = ( + handler[containing_type.__name__] + ).__sizeof__() + new_dep_node.set_field_ready( + elem_name, True, size_of_containing_type + ) elif containing_type.__module__ == ctypes.__name__: logger.debug(f"Processing ctype internal{containing_type}") new_dep_node.set_field_ready(elem_name, True) @@ -165,8 +169,12 @@ def process_vmlinux_post_ast( process_vmlinux_post_ast( elem_type, llvm_handler, handler, processing_stack ) - size_of_containing_type = (handler[elem_type.__name__]).__sizeof__() - new_dep_node.set_field_ready(elem_name, True, size_of_containing_type) + size_of_containing_type = ( + handler[elem_type.__name__] + ).__sizeof__() + new_dep_node.set_field_ready( + elem_name, True, size_of_containing_type + ) else: raise ValueError( f"{elem_name} with type {elem_type} from module {module_name} not supported in recursive resolver" diff --git a/pythonbpf/vmlinux_parser/dependency_node.py b/pythonbpf/vmlinux_parser/dependency_node.py index a6d4013a..a0e1d456 100644 --- a/pythonbpf/vmlinux_parser/dependency_node.py +++ b/pythonbpf/vmlinux_parser/dependency_node.py @@ -215,7 +215,12 @@ def set_field_bitfield_size( # Invalidate readiness cache self._ready_cache = None - def set_field_ready(self, name: str, is_ready: bool = False, size_of_containing_type: Optional[int] = None) -> None: + def set_field_ready( + self, + name: str, + is_ready: bool = False, + size_of_containing_type: Optional[int] = None, + ) -> None: """Mark a field as ready or not ready.""" if name not in self.fields: raise KeyError(f"Field '{name}' does not exist in node '{self.name}'") @@ -226,7 +231,9 @@ def set_field_ready(self, name: str, is_ready: bool = False, size_of_containing_ # Invalidate readiness cache self._ready_cache = None - def _calculate_size(self, name: str, size_of_containing_type: Optional[int] = None) -> int: + def _calculate_size( + self, name: str, size_of_containing_type: Optional[int] = None + ) -> int: processing_field = self.fields[name] # size_of_field will be in bytes if processing_field.type.__module__ == ctypes.__name__: @@ -244,8 +251,7 @@ def _calculate_size(self, name: str, size_of_containing_type: Optional[int] = No return size_of_field elif processing_field.containing_type.__module__ == "vmlinux": size_of_field = ( - size_of_containing_type - * processing_field.type_size + size_of_containing_type * processing_field.type_size ) return size_of_field elif issubclass(processing_field.ctype_complex_type, ctypes._Pointer): diff --git a/tests/failing_tests/xdp_pass.py b/tests/failing_tests/xdp_pass.py index da438c8b..99006955 100644 --- a/tests/failing_tests/xdp_pass.py +++ b/tests/failing_tests/xdp_pass.py @@ -3,6 +3,7 @@ from pythonbpf.helper import XDP_PASS from vmlinux import TASK_COMM_LEN # noqa: F401 from vmlinux import struct_trace_event_raw_sys_enter # noqa: F401 + # from vmlinux import struct_request from vmlinux import struct_xdp_md # from vmlinux import struct_trace_event_raw_sys_enter # noqa: F401 From c499fe7421bfdfd8df1ce170a5d611add22e2d03 Mon Sep 17 00:00:00 2001 From: varun-r-mallya Date: Wed, 15 Oct 2025 18:05:57 +0530 Subject: [PATCH 6/6] solve static typing issues --- pythonbpf/vmlinux_parser/dependency_node.py | 37 ++++++++++++++++----- pythonbpf/vmlinux_parser/import_detector.py | 16 ++++++--- 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/pythonbpf/vmlinux_parser/dependency_node.py b/pythonbpf/vmlinux_parser/dependency_node.py index a0e1d456..feebec35 100644 --- a/pythonbpf/vmlinux_parser/dependency_node.py +++ b/pythonbpf/vmlinux_parser/dependency_node.py @@ -240,28 +240,47 @@ def _calculate_size( size_of_field = ctypes.sizeof(processing_field.type) return size_of_field elif processing_field.type.__module__ == "vmlinux": - size_of_field: int = 0 if processing_field.ctype_complex_type is not None: if issubclass(processing_field.ctype_complex_type, ctypes.Array): if processing_field.containing_type.__module__ == ctypes.__name__: - size_of_field = ( - ctypes.sizeof(processing_field.containing_type) - * processing_field.type_size - ) + if ( + processing_field.containing_type is not None + and processing_field.type_size is not None + ): + size_of_field = ( + ctypes.sizeof(processing_field.containing_type) + * processing_field.type_size + ) + else: + raise RuntimeError( + f"{processing_field} has no containing_type or type_size" + ) return size_of_field elif processing_field.containing_type.__module__ == "vmlinux": - size_of_field = ( - size_of_containing_type * processing_field.type_size - ) + if ( + size_of_containing_type is not None + and processing_field.type_size is not None + ): + size_of_field = ( + size_of_containing_type * processing_field.type_size + ) + else: + raise RuntimeError( + f"{processing_field} has no containing_type or type_size" + ) return size_of_field elif issubclass(processing_field.ctype_complex_type, ctypes._Pointer): - return ctypes.sizeof(ctypes.pointer()) + return ctypes.sizeof(ctypes.c_void_p) else: raise NotImplementedError( "This subclass of ctype not supported yet" ) else: # search up pre-created stuff and get size + if size_of_containing_type is None: + raise RuntimeError( + f"Size of containing type {size_of_containing_type} is None" + ) return size_of_containing_type else: diff --git a/pythonbpf/vmlinux_parser/import_detector.py b/pythonbpf/vmlinux_parser/import_detector.py index e314a355..972b1ff2 100644 --- a/pythonbpf/vmlinux_parser/import_detector.py +++ b/pythonbpf/vmlinux_parser/import_detector.py @@ -1,6 +1,6 @@ import ast import logging -from typing import List, Tuple, Dict +from typing import List, Tuple, Any import importlib import inspect @@ -82,7 +82,7 @@ def vmlinux_proc(tree: ast.AST, module): # initialise dependency handler handler = DependencyHandler() # initialise assignment dictionary of name to type - assignments: Dict[str, type] = {} + assignments: dict[str, tuple[type, Any]] = {} if not import_statements: logger.info("No vmlinux imports found") @@ -132,10 +132,16 @@ def vmlinux_proc(tree: ast.AST, module): return assignments -def process_vmlinux_assign(node, module, assignments: Dict[str, type]): +def process_vmlinux_assign(node, module, assignments: dict[str, tuple[type, Any]]): # Check if this is a simple assignment with a constant value if len(node.targets) == 1 and isinstance(node.targets[0], ast.Name): target_name = node.targets[0].id if isinstance(node.value, ast.Constant): - assignments[target_name] = node.value.value - logger.info(f"Added assignment: {target_name} = {node.value.value}") + assignments[target_name] = (type(node.value.value), node.value.value) + logger.info( + f"Added assignment: {target_name} = {node.value.value!r} of type {type(node.value.value)}" + ) + else: + raise ValueError(f"Unsupported assignment type for {target_name}") + else: + raise ValueError("Not a simple assignment")