Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file removed pythonbpf/tbaa_gen/__init__.py
Empty file.
17 changes: 15 additions & 2 deletions pythonbpf/vmlinux_parser/class_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ def process_vmlinux_post_ast(
if len(field_elem) == 2:
field_name, field_type = field_elem
elif len(field_elem) == 3:
raise NotImplementedError(
"Bitfields are not supported in the current version"
)
field_name, field_type, bitfield_size = field_elem
field_table[field_name] = [field_type, bitfield_size]
elif hasattr(class_obj, "__annotations__"):
Expand Down Expand Up @@ -144,7 +147,12 @@ def process_vmlinux_post_ast(
process_vmlinux_post_ast(
containing_type, llvm_handler, handler, processing_stack
)
new_dep_node.set_field_ready(elem_name, True)
size_of_containing_type = (
handler[containing_type.__name__]
).__sizeof__()
new_dep_node.set_field_ready(
elem_name, True, size_of_containing_type
)
elif containing_type.__module__ == ctypes.__name__:
logger.debug(f"Processing ctype internal{containing_type}")
new_dep_node.set_field_ready(elem_name, True)
Expand All @@ -161,7 +169,12 @@ def process_vmlinux_post_ast(
process_vmlinux_post_ast(
elem_type, llvm_handler, handler, processing_stack
)
new_dep_node.set_field_ready(elem_name, True)
size_of_containing_type = (
handler[elem_type.__name__]
).__sizeof__()
new_dep_node.set_field_ready(
elem_name, True, size_of_containing_type
)
else:
raise ValueError(
f"{elem_name} with type {elem_type} from module {module_name} not supported in recursive resolver"
Expand Down
20 changes: 20 additions & 0 deletions pythonbpf/vmlinux_parser/dependency_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,3 +147,23 @@ def __len__(self) -> int:
int: The number of nodes
"""
return len(self._nodes)

def __getitem__(self, name: str) -> DependencyNode:
"""
Get a node by name using dictionary-style access.

Args:
name: The name of the node to retrieve

Returns:
DependencyNode: The node with the given name

Raises:
KeyError: If no node with the given name exists

Example:
node = handler["some-dep_node_name"]
"""
if name not in self._nodes:
raise KeyError(f"No node with name '{name}' found")
return self._nodes[name]
77 changes: 76 additions & 1 deletion pythonbpf/vmlinux_parser/dependency_node.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from dataclasses import dataclass, field
from typing import Dict, Any, Optional
import ctypes


# TODO: FIX THE FUCKING TYPE NAME CONVENTION.
Expand All @@ -13,6 +14,7 @@ class Field:
containing_type: Optional[Any]
type_size: Optional[int]
bitfield_size: Optional[int]
offset: int
value: Any = None
ready: bool = False

Expand Down Expand Up @@ -60,6 +62,10 @@ def set_bitfield_size(self, bitfield_size: Any, mark_ready: bool = False) -> Non
if mark_ready:
self.ready = True

def set_offset(self, offset: int) -> None:
"""Set the offset of this field"""
self.offset = offset


@dataclass
class DependencyNode:
Expand Down Expand Up @@ -109,6 +115,7 @@ class DependencyNode:
depends_on: Optional[list[str]] = None
fields: Dict[str, Field] = field(default_factory=dict)
_ready_cache: Optional[bool] = field(default=None, repr=False)
current_offset: int = 0

def add_field(
self,
Expand All @@ -120,6 +127,7 @@ def add_field(
ctype_complex_type: Optional[int] = None,
bitfield_size: Optional[int] = None,
ready: bool = False,
offset: int = 0,
) -> None:
"""Add a field to the node with an optional initial value and readiness state."""
if self.depends_on is None:
Expand All @@ -133,10 +141,14 @@ def add_field(
type_size=type_size,
ctype_complex_type=ctype_complex_type,
bitfield_size=bitfield_size,
offset=offset,
)
# Invalidate readiness cache
self._ready_cache = None

def __sizeof__(self):
return self.current_offset

def get_field(self, name: str) -> Field:
"""Get a field by name."""
return self.fields[name]
Expand Down Expand Up @@ -203,15 +215,78 @@ def set_field_bitfield_size(
# Invalidate readiness cache
self._ready_cache = None

def set_field_ready(self, name: str, is_ready: bool = False) -> None:
def set_field_ready(
self,
name: str,
is_ready: bool = False,
size_of_containing_type: Optional[int] = None,
) -> None:
"""Mark a field as ready or not ready."""
if name not in self.fields:
raise KeyError(f"Field '{name}' does not exist in node '{self.name}'")

self.fields[name].set_ready(is_ready)
self.fields[name].set_offset(self.current_offset)
self.current_offset += self._calculate_size(name, size_of_containing_type)
# Invalidate readiness cache
self._ready_cache = None

def _calculate_size(
self, name: str, size_of_containing_type: Optional[int] = None
) -> int:
processing_field = self.fields[name]
# size_of_field will be in bytes
if processing_field.type.__module__ == ctypes.__name__:
size_of_field = ctypes.sizeof(processing_field.type)
return size_of_field
elif processing_field.type.__module__ == "vmlinux":
if processing_field.ctype_complex_type is not None:
if issubclass(processing_field.ctype_complex_type, ctypes.Array):
if processing_field.containing_type.__module__ == ctypes.__name__:
if (
processing_field.containing_type is not None
and processing_field.type_size is not None
):
size_of_field = (
ctypes.sizeof(processing_field.containing_type)
* processing_field.type_size
)
else:
raise RuntimeError(
f"{processing_field} has no containing_type or type_size"
)
return size_of_field
elif processing_field.containing_type.__module__ == "vmlinux":
if (
size_of_containing_type is not None
and processing_field.type_size is not None
):
size_of_field = (
size_of_containing_type * processing_field.type_size
)
else:
raise RuntimeError(
f"{processing_field} has no containing_type or type_size"
)
return size_of_field
elif issubclass(processing_field.ctype_complex_type, ctypes._Pointer):
return ctypes.sizeof(ctypes.c_void_p)
else:
raise NotImplementedError(
"This subclass of ctype not supported yet"
)
else:
# search up pre-created stuff and get size
if size_of_containing_type is None:
raise RuntimeError(
f"Size of containing type {size_of_containing_type} is None"
)
return size_of_containing_type

else:
raise ModuleNotFoundError("Module is not supported for the operation")
raise RuntimeError("control should not reach here")

@property
def is_ready(self) -> bool:
"""Check if the node is ready (all fields are ready)."""
Expand Down
26 changes: 19 additions & 7 deletions pythonbpf/vmlinux_parser/import_detector.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import ast
import logging
from typing import List, Tuple, Dict
from typing import List, Tuple, Any
import importlib
import inspect

from .dependency_handler import DependencyHandler
from .ir_generation import IRGenerator
from .ir_gen import IRGenerator
from .class_handler import process_vmlinux_class

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -82,7 +82,7 @@ def vmlinux_proc(tree: ast.AST, module):
# initialise dependency handler
handler = DependencyHandler()
# initialise assignment dictionary of name to type
assignments: Dict[str, type] = {}
assignments: dict[str, tuple[type, Any]] = {}

if not import_statements:
logger.info("No vmlinux imports found")
Expand Down Expand Up @@ -129,7 +129,19 @@ def vmlinux_proc(tree: ast.AST, module):
)

IRGenerator(module, handler)


def process_vmlinux_assign(node, module, assignments: Dict[str, type]):
raise NotImplementedError("Assignment handling has not been implemented yet")
return assignments


def process_vmlinux_assign(node, module, assignments: dict[str, tuple[type, Any]]):
# Check if this is a simple assignment with a constant value
if len(node.targets) == 1 and isinstance(node.targets[0], ast.Name):
target_name = node.targets[0].id
if isinstance(node.value, ast.Constant):
assignments[target_name] = (type(node.value.value), node.value.value)
logger.info(
f"Added assignment: {target_name} = {node.value.value!r} of type {type(node.value.value)}"
)
else:
raise ValueError(f"Unsupported assignment type for {target_name}")
else:
raise ValueError("Not a simple assignment")
3 changes: 3 additions & 0 deletions pythonbpf/vmlinux_parser/ir_gen/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from .ir_generation import IRGenerator

__all__ = ["IRGenerator"]
36 changes: 36 additions & 0 deletions pythonbpf/vmlinux_parser/ir_gen/ir_generation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import logging
from pythonbpf.vmlinux_parser.dependency_handler import DependencyHandler

logger = logging.getLogger(__name__)


class IRGenerator:
def __init__(self, module, handler: DependencyHandler):
self.module = module
self.handler: DependencyHandler = handler
self.generated: list[str] = []
if not handler.is_ready:
raise ImportError(
"Semantic analysis of vmlinux imports failed. Cannot generate IR"
)
for struct in handler:
self.struct_processor(struct)
print()

def struct_processor(self, struct):
if struct.name not in self.generated:
print(f"IR generating for {struct.name}")
print(f"Struct is {struct}")
for dependency in struct.depends_on:
if dependency not in self.generated:
dep_node_from_dependency = self.handler[dependency]
self.struct_processor(dep_node_from_dependency)
self.generated.append(dependency)
# write actual processor logic here after assuming all dependencies are resolved
# this part cannot yet resolve circular dependencies. Gets stuck on an infinite loop during that.
self.generated.append(struct.name)

def struct_name_generator(
self,
) -> None:
pass
17 changes: 0 additions & 17 deletions pythonbpf/vmlinux_parser/ir_generation.py

This file was deleted.

8 changes: 6 additions & 2 deletions tests/failing_tests/xdp_pass.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
from pythonbpf import bpf, map, section, bpfglobal, compile_to_ir
from pythonbpf.maps import HashMap
from pythonbpf.helper import XDP_PASS
from vmlinux import struct_xdp_md
from vmlinux import TASK_COMM_LEN # noqa: F401
from vmlinux import struct_trace_event_raw_sys_enter # noqa: F401
from vmlinux import struct_ring_buffer_per_cpu # noqa: F401

# from vmlinux import struct_request
from vmlinux import struct_xdp_md
# from vmlinux import struct_trace_event_raw_sys_enter # noqa: F401
# from vmlinux import struct_ring_buffer_per_cpu # noqa: F401

from ctypes import c_int64

Expand Down