From a9d340049d6dce00d9d4851282dc189008a33b08 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman <105607645+karthikscale3@users.noreply.github.com> Date: Fri, 19 Jul 2024 14:03:01 -0700 Subject: [PATCH 1/7] DSPy - Bugfixes and update to dspy-ai (#246) * Bugfix DSPy instrumentation * Add example for parallel execution * Bump version --- .../math_problems_cot_parallel.py | 59 +++++++++++++++ .../instrumentation/dspy/instrumentation.py | 4 +- .../instrumentation/dspy/patch.py | 72 +++++++++---------- src/langtrace_python_sdk/version.py | 2 +- 4 files changed, 98 insertions(+), 39 deletions(-) create mode 100644 src/examples/dspy_example/math_problems_cot_parallel.py diff --git a/src/examples/dspy_example/math_problems_cot_parallel.py b/src/examples/dspy_example/math_problems_cot_parallel.py new file mode 100644 index 00000000..8c5fabf7 --- /dev/null +++ b/src/examples/dspy_example/math_problems_cot_parallel.py @@ -0,0 +1,59 @@ +import dspy +from dspy.datasets.gsm8k import GSM8K, gsm8k_metric +from dspy.teleprompt import BootstrapFewShot +from concurrent.futures import ThreadPoolExecutor +from opentelemetry.context import get_current, attach, detach + +# flake8: noqa +from langtrace_python_sdk import langtrace, with_langtrace_root_span + +langtrace.init() + +turbo = dspy.OpenAI(model="gpt-3.5-turbo", max_tokens=250) +dspy.settings.configure(lm=turbo) + +# Load math questions from the GSM8K dataset +gsm8k = GSM8K() +gsm8k_trainset, gsm8k_devset = gsm8k.train[:10], gsm8k.dev[:10] + +class CoT(dspy.Module): + def __init__(self): + super().__init__() + self.prog = dspy.ChainOfThought("question -> answer") + + def forward(self, question): + return self.prog(question=question) + +@with_langtrace_root_span(name="parallel_example") +def example(): + # Set up the optimizer: we want to "bootstrap" (i.e., self-generate) 4-shot examples of our CoT program. + config = dict(max_bootstrapped_demos=4, max_labeled_demos=4) + + # Optimize! Use the `gsm8k_metric` here. In general, the metric is going to tell the optimizer how well it's doing. + teleprompter = BootstrapFewShot(metric=gsm8k_metric, **config) + optimized_cot = teleprompter.compile(CoT(), trainset=gsm8k_trainset) + + questions = [ + "What is the cosine of 0?", + "What is the tangent of 0?", + ] + + current_context = get_current() + + def run_with_context(context, func, *args, **kwargs): + token = attach(context) + try: + return func(*args, **kwargs) + finally: + detach(token) + + with ThreadPoolExecutor(max_workers=2) as executor: + futures = [executor.submit(run_with_context, current_context, optimized_cot, question=q) for q in questions] + + for future in futures: + ans = future.result() + print(ans) + + +if __name__ == "__main__": + example() diff --git a/src/langtrace_python_sdk/instrumentation/dspy/instrumentation.py b/src/langtrace_python_sdk/instrumentation/dspy/instrumentation.py index 375989ef..a9cc587e 100644 --- a/src/langtrace_python_sdk/instrumentation/dspy/instrumentation.py +++ b/src/langtrace_python_sdk/instrumentation/dspy/instrumentation.py @@ -27,12 +27,12 @@ class DspyInstrumentation(BaseInstrumentor): The DspyInstrumentor class represents the DSPy instrumentation""" def instrumentation_dependencies(self) -> Collection[str]: - return ["dspy >= 0.1.5"] + return ["dspy-ai >= 2.0.0"] def _instrument(self, **kwargs): tracer_provider = kwargs.get("tracer_provider") tracer = get_tracer(__name__, "", tracer_provider) - version = v("dspy") + version = v("dspy-ai") _W( "dspy.teleprompt.bootstrap", "BootstrapFewShot.compile", diff --git a/src/langtrace_python_sdk/instrumentation/dspy/patch.py b/src/langtrace_python_sdk/instrumentation/dspy/patch.py index 3427eba9..181b276d 100644 --- a/src/langtrace_python_sdk/instrumentation/dspy/patch.py +++ b/src/langtrace_python_sdk/instrumentation/dspy/patch.py @@ -39,25 +39,25 @@ def traced_method(wrapped, instance, args, kwargs): ), } span_attributes["dspy.optimizer.module.prog"] = json.dumps(prog) - if "metric" in instance and instance.metric: - span_attributes["dspy.optimizer.metric"] = instance.metric.__name__ + if hasattr(instance, 'metric'): + span_attributes["dspy.optimizer.metric"] = getattr(instance, 'metric').__name__ if kwargs.get("trainset") and len(kwargs.get("trainset")) > 0: span_attributes["dspy.optimizer.trainset"] = str(kwargs.get("trainset")) config = {} - if "metric_threshold" in instance and instance.metric_threshold: - config["metric_threshold"] = instance.metric_threshold - if "teacher_settings" in instance and instance.teacher_settings: - config["teacher_settings"] = instance.teacher_settings - if "max_bootstrapped_demos" in instance and instance.max_bootstrapped_demos: - config["max_bootstrapped_demos"] = instance.max_bootstrapped_demos - if "max_labeled_demos" in instance and instance.max_labeled_demos: - config["max_labeled_demos"] = instance.max_labeled_demos - if "max_rounds" in instance and instance.max_rounds: - config["max_rounds"] = instance.max_rounds - if "max_errors" in instance and instance.max_errors: - config["max_errors"] = instance.max_errors - if "error_count" in instance and instance.error_count: - config["error_count"] = instance.error_count + if hasattr(instance, 'metric_threshold'): + config["metric_threshold"] = getattr(instance, 'metric_threshold') + if hasattr(instance, 'teacher_settings'): + config["teacher_settings"] = getattr(instance, 'teacher_settings') + if hasattr(instance, 'max_bootstrapped_demos'): + config["max_bootstrapped_demos"] = getattr(instance, 'max_bootstrapped_demos') + if hasattr(instance, 'max_labeled_demos'): + config["max_labeled_demos"] = getattr(instance, 'max_labeled_demos') + if hasattr(instance, 'max_rounds'): + config["max_rounds"] = getattr(instance, 'max_rounds') + if hasattr(instance, 'max_steps'): + config["max_errors"] = getattr(instance, 'max_errors') + if hasattr(instance, 'error_count'): + config["error_count"] = getattr(instance, 'error_count') if config and len(config) > 0: span_attributes["dspy.optimizer.config"] = json.dumps(config) @@ -147,30 +147,30 @@ def traced_method(wrapped, instance, args, kwargs): **(extra_attributes if extra_attributes is not None else {}), } - if "devset" in instance and instance.devset is not None: - span_attributes["dspy.evaluate.devset"] = str(instance.devset) - if "display" in instance and instance.display is not None: - span_attributes["dspy.evaluate.display"] = str(instance.display) - if "num_threads" in instance and instance.num_threads is not None: - span_attributes["dspy.evaluate.num_threads"] = str(instance.num_threads) - if "return_outputs" in instance and instance.return_outputs is not None: + if hasattr(instance, "devset"): + span_attributes["dspy.evaluate.devset"] = str(getattr(instance, "devset")) + if hasattr(instance, "trainset"): + span_attributes["dspy.evaluate.display"] = str(getattr(instance, "trainset")) + if hasattr(instance, "num_threads"): + span_attributes["dspy.evaluate.num_threads"] = str(getattr(instance, "num_threads")) + if hasattr(instance, "return_outputs"): span_attributes["dspy.evaluate.return_outputs"] = str( - instance.return_outputs + getattr(instance, "return_outputs") ) - if "display_table" in instance and instance.display_table is not None: - span_attributes["dspy.evaluate.display_table"] = str(instance.display_table) - if "display_progress" in instance and instance.display_progress is not None: + if hasattr(instance, "display_table"): + span_attributes["dspy.evaluate.display_table"] = str(getattr(instance, "display_table")) + if hasattr(instance, "display_progress"): span_attributes["dspy.evaluate.display_progress"] = str( - instance.display_progress + getattr(instance, "display_progress") ) - if "metric" in instance and instance.metric is not None: - span_attributes["dspy.evaluate.metric"] = instance.metric.__name__ - if "error_count" in instance and instance.error_count is not None: - span_attributes["dspy.evaluate.error_count"] = str(instance.error_count) - if "error_lock" in instance and instance.error_lock is not None: - span_attributes["dspy.evaluate.error_lock"] = str(instance.error_lock) - if "max_errors" in instance and instance.max_errors is not None: - span_attributes["dspy.evaluate.max_errors"] = str(instance.max_errors) + if hasattr(instance, "metric"): + span_attributes["dspy.evaluate.metric"] = getattr(instance, "metric").__name__ + if hasattr(instance, "error_count"): + span_attributes["dspy.evaluate.error_count"] = str(getattr(instance, "error_count")) + if hasattr(instance, "error_lock"): + span_attributes["dspy.evaluate.error_lock"] = str(getattr(instance, "error_lock")) + if hasattr(instance, "max_errors"): + span_attributes["dspy.evaluate.max_errors"] = str(getattr(instance, "max_errors")) if args and len(args) > 0: span_attributes["dspy.evaluate.args"] = str(args) diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index b19ee4b7..ba51cedf 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "2.2.1" +__version__ = "2.2.2" From e6d854222e87b0be51d140fe67dd82fbc3d7c108 Mon Sep 17 00:00:00 2001 From: darshit-s3 <119623510+darshit-s3@users.noreply.github.com> Date: Mon, 22 Jul 2024 14:23:54 -0700 Subject: [PATCH 2/7] chore: add back openai tool choice arg (#245) * chore: add back openai tool choice arg * style: fix formating --- .../chat_completion_tool_choice.py | 73 +++++++++++++++++++ .../instrumentation/anthropic/patch.py | 4 +- .../instrumentation/gemini/patch.py | 5 +- src/langtrace_python_sdk/utils/llm.py | 1 + 4 files changed, 81 insertions(+), 2 deletions(-) create mode 100644 src/examples/openai_example/chat_completion_tool_choice.py diff --git a/src/examples/openai_example/chat_completion_tool_choice.py b/src/examples/openai_example/chat_completion_tool_choice.py new file mode 100644 index 00000000..e1aaef11 --- /dev/null +++ b/src/examples/openai_example/chat_completion_tool_choice.py @@ -0,0 +1,73 @@ +# Example taken from https://platform.openai.com/docs/guides/function-calling +import json + +from dotenv import find_dotenv, load_dotenv +from openai import OpenAI + +from langtrace_python_sdk import langtrace + +client = OpenAI() + +_ = load_dotenv(find_dotenv()) + +langtrace.init( + write_spans_to_console=True, +) + + +# Example dummy function hard coded to return the same weather +# In production, this could be your backend API or an external API +def get_current_weather(location, unit="fahrenheit"): + """Get the current weather in a given location""" + if "tokyo" in location.lower(): + return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit}) + elif "san francisco" in location.lower(): + return json.dumps( + {"location": "San Francisco", "temperature": "72", "unit": unit} + ) + elif "paris" in location.lower(): + return json.dumps({"location": "Paris", "temperature": "22", "unit": unit}) + else: + return json.dumps({"location": location, "temperature": "unknown"}) + + +def run_conversation(): + # Step 1: send the conversation and available functions to the model + messages = [ + { + "role": "user", + "content": "What's the weather like in San Francisco, Tokyo, and Paris?", + } + ] + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + }, + } + ] + response = client.chat.completions.create( + model="gpt-4o", + messages=messages, + tools=tools, + tool_choice="required", # auto is default, but we'll be explicit + ) + response_message = response.choices[0].message + tool_calls = response_message.tool_calls + print(tool_calls) + + +print(run_conversation()) diff --git a/src/langtrace_python_sdk/instrumentation/anthropic/patch.py b/src/langtrace_python_sdk/instrumentation/anthropic/patch.py index b06fae33..dbaade3e 100644 --- a/src/langtrace_python_sdk/instrumentation/anthropic/patch.py +++ b/src/langtrace_python_sdk/instrumentation/anthropic/patch.py @@ -48,7 +48,9 @@ def traced_method(wrapped, instance, args, kwargs): prompts = kwargs.get("messages", []) system = kwargs.get("system") if system: - prompts = [{"role": "system", "content": system}] + kwargs.get("messages", []) + prompts = [{"role": "system", "content": system}] + kwargs.get( + "messages", [] + ) span_attributes = { **get_langtrace_attributes(version, service_provider), diff --git a/src/langtrace_python_sdk/instrumentation/gemini/patch.py b/src/langtrace_python_sdk/instrumentation/gemini/patch.py index b4b9b799..ceedb379 100644 --- a/src/langtrace_python_sdk/instrumentation/gemini/patch.py +++ b/src/langtrace_python_sdk/instrumentation/gemini/patch.py @@ -110,7 +110,10 @@ def get_llm_model(instance): def serialize_prompts(args, kwargs, instance): prompts = [] - if hasattr(instance, "_system_instruction") and instance._system_instruction is not None: + if ( + hasattr(instance, "_system_instruction") + and instance._system_instruction is not None + ): system_prompt = { "role": "system", "content": instance._system_instruction.__dict__["_pb"].parts[0].text, diff --git a/src/langtrace_python_sdk/utils/llm.py b/src/langtrace_python_sdk/utils/llm.py index ed29451a..965f2324 100644 --- a/src/langtrace_python_sdk/utils/llm.py +++ b/src/langtrace_python_sdk/utils/llm.py @@ -123,6 +123,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None): SpanAttributes.LLM_FREQUENCY_PENALTY: kwargs.get("frequency_penalty"), SpanAttributes.LLM_REQUEST_SEED: kwargs.get("seed"), SpanAttributes.LLM_TOOLS: json.dumps(tools) if tools else None, + SpanAttributes.LLM_TOOL_CHOICE: kwargs.get("tool_choice"), SpanAttributes.LLM_REQUEST_LOGPROPS: kwargs.get("logprobs"), SpanAttributes.LLM_REQUEST_LOGITBIAS: kwargs.get("logit_bias"), SpanAttributes.LLM_REQUEST_TOP_LOGPROPS: kwargs.get("top_logprobs"), From 8b41bf98fa7e4042edfaacd3c2104ba693cdef00 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman <105607645+karthikscale3@users.noreply.github.com> Date: Mon, 22 Jul 2024 22:59:58 -0700 Subject: [PATCH 3/7] Allow DSPy span naming (#249) * Update example * Allow span naming using langtrace.span.name * Bump version --- .../math_problems_cot_parallel.py | 20 +++++----------- .../instrumentation/dspy/patch.py | 24 ++++++++++++++++--- src/langtrace_python_sdk/version.py | 2 +- 3 files changed, 28 insertions(+), 18 deletions(-) diff --git a/src/examples/dspy_example/math_problems_cot_parallel.py b/src/examples/dspy_example/math_problems_cot_parallel.py index 8c5fabf7..b70890b5 100644 --- a/src/examples/dspy_example/math_problems_cot_parallel.py +++ b/src/examples/dspy_example/math_problems_cot_parallel.py @@ -1,8 +1,8 @@ +import contextvars import dspy from dspy.datasets.gsm8k import GSM8K, gsm8k_metric from dspy.teleprompt import BootstrapFewShot from concurrent.futures import ThreadPoolExecutor -from opentelemetry.context import get_current, attach, detach # flake8: noqa from langtrace_python_sdk import langtrace, with_langtrace_root_span @@ -22,7 +22,8 @@ def __init__(self): self.prog = dspy.ChainOfThought("question -> answer") def forward(self, question): - return self.prog(question=question) + result = inject_additional_attributes(lambda: self.prog(question=question), {'langtrace.span.name': 'MathProblemsCotParallel'}) + return result @with_langtrace_root_span(name="parallel_example") def example(): @@ -34,21 +35,12 @@ def example(): optimized_cot = teleprompter.compile(CoT(), trainset=gsm8k_trainset) questions = [ - "What is the cosine of 0?", - "What is the tangent of 0?", + "What is the sine of 0?", + "What is the tangent of 100?", ] - current_context = get_current() - - def run_with_context(context, func, *args, **kwargs): - token = attach(context) - try: - return func(*args, **kwargs) - finally: - detach(token) - with ThreadPoolExecutor(max_workers=2) as executor: - futures = [executor.submit(run_with_context, current_context, optimized_cot, question=q) for q in questions] + futures = [executor.submit(contextvars.copy_context().run, optimized_cot, question=q) for q in questions] for future in futures: ans = future.result() diff --git a/src/langtrace_python_sdk/instrumentation/dspy/patch.py b/src/langtrace_python_sdk/instrumentation/dspy/patch.py index 181b276d..4b57fe16 100644 --- a/src/langtrace_python_sdk/instrumentation/dspy/patch.py +++ b/src/langtrace_python_sdk/instrumentation/dspy/patch.py @@ -61,8 +61,14 @@ def traced_method(wrapped, instance, args, kwargs): if config and len(config) > 0: span_attributes["dspy.optimizer.config"] = json.dumps(config) + # passed operation name + opname = operation_name + if extra_attributes is not None and "langtrace.span.name" in extra_attributes: + # append the operation name to the span name + opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}" + attributes = FrameworkSpanAttributes(**span_attributes) - with tracer.start_as_current_span(operation_name, kind=SpanKind.CLIENT) as span: + with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span: _set_input_attributes(span, kwargs, attributes) try: @@ -100,6 +106,12 @@ def traced_method(wrapped, instance, args, kwargs): **(extra_attributes if extra_attributes is not None else {}), } + # passed operation name + opname = operation_name + if extra_attributes is not None and "langtrace.span.name" in extra_attributes: + # append the operation name to the span name + opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}" + if instance.__class__.__name__: span_attributes["dspy.signature.name"] = instance.__class__.__name__ span_attributes["dspy.signature"] = str(instance) @@ -108,7 +120,7 @@ def traced_method(wrapped, instance, args, kwargs): span_attributes["dspy.signature.args"] = str(kwargs) attributes = FrameworkSpanAttributes(**span_attributes) - with tracer.start_as_current_span(operation_name, kind=SpanKind.CLIENT) as span: + with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span: _set_input_attributes(span, kwargs, attributes) try: @@ -147,6 +159,12 @@ def traced_method(wrapped, instance, args, kwargs): **(extra_attributes if extra_attributes is not None else {}), } + # passed operation name + opname = operation_name + if extra_attributes is not None and "langtrace.span.name" in extra_attributes: + # append the operation name to the span name + opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}" + if hasattr(instance, "devset"): span_attributes["dspy.evaluate.devset"] = str(getattr(instance, "devset")) if hasattr(instance, "trainset"): @@ -175,7 +193,7 @@ def traced_method(wrapped, instance, args, kwargs): span_attributes["dspy.evaluate.args"] = str(args) attributes = FrameworkSpanAttributes(**span_attributes) - with tracer.start_as_current_span(operation_name, kind=SpanKind.CLIENT) as span: + with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span: _set_input_attributes(span, kwargs, attributes) try: diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index ba51cedf..f394e699 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "2.2.2" +__version__ = "2.2.3" From b667cd51c8af078625f0bf3040db523f996dfb70 Mon Sep 17 00:00:00 2001 From: Ali Waleed <134522290+alizenhom@users.noreply.github.com> Date: Tue, 23 Jul 2024 14:34:30 +0300 Subject: [PATCH 4/7] silent fail if module is not found for crewai --- .../instrumentation/crewai/instrumentation.py | 33 ++++++++++--------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/src/langtrace_python_sdk/instrumentation/crewai/instrumentation.py b/src/langtrace_python_sdk/instrumentation/crewai/instrumentation.py index e648b671..78cb1f87 100644 --- a/src/langtrace_python_sdk/instrumentation/crewai/instrumentation.py +++ b/src/langtrace_python_sdk/instrumentation/crewai/instrumentation.py @@ -33,21 +33,24 @@ def _instrument(self, **kwargs): tracer_provider = kwargs.get("tracer_provider") tracer = get_tracer(__name__, "", tracer_provider) version = v("crewai") - _W( - "crewai.crew", - "Crew.kickoff", - patch_crew("Crew.kickoff", version, tracer), - ) - _W( - "crewai.agent", - "Agent.execute_task", - patch_crew("Agent.execute_task", version, tracer), - ) - _W( - "crewai.task", - "Task.execute", - patch_crew("Task.execute", version, tracer), - ) + try: + _W( + "crewai.crew", + "Crew.kickoff", + patch_crew("Crew.kickoff", version, tracer), + ) + _W( + "crewai.agent", + "Agent.execute_task", + patch_crew("Agent.execute_task", version, tracer), + ) + _W( + "crewai.task", + "Task.execute", + patch_crew("Task.execute", version, tracer), + ) + except Exception as e: + pass def _uninstrument(self, **kwargs): pass From 95a491f927eb53be22cd316f63cf584f2a1a3190 Mon Sep 17 00:00:00 2001 From: Ali Waleed <134522290+alizenhom@users.noreply.github.com> Date: Tue, 23 Jul 2024 14:36:28 +0300 Subject: [PATCH 5/7] bump version --- src/langtrace_python_sdk/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index f394e699..62fa04d7 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "2.2.3" +__version__ = "2.2.4" From 7fe25da464f1646d8d745d78fa1c3c8aca12e464 Mon Sep 17 00:00:00 2001 From: Ali Waleed <134522290+alizenhom@users.noreply.github.com> Date: Wed, 24 Jul 2024 10:39:59 +0300 Subject: [PATCH 6/7] add llm.system to span attributes --- src/langtrace_python_sdk/utils/llm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/langtrace_python_sdk/utils/llm.py b/src/langtrace_python_sdk/utils/llm.py index 965f2324..4aefc912 100644 --- a/src/langtrace_python_sdk/utils/llm.py +++ b/src/langtrace_python_sdk/utils/llm.py @@ -88,6 +88,7 @@ def get_langtrace_attributes(version, service_provider, vendor_type="llm"): SpanAttributes.LANGTRACE_SERVICE_VERSION: version, SpanAttributes.LANGTRACE_SERVICE_NAME: service_provider, SpanAttributes.LANGTRACE_SERVICE_TYPE: vendor_type, + SpanAttributes.LLM_SYSTEM: service_provider, } From dab7e16f71719e7ce88163721aefde9f8649339f Mon Sep 17 00:00:00 2001 From: Ali Waleed <134522290+alizenhom@users.noreply.github.com> Date: Wed, 24 Jul 2024 10:41:09 +0300 Subject: [PATCH 7/7] bump version --- src/langtrace_python_sdk/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/langtrace_python_sdk/version.py b/src/langtrace_python_sdk/version.py index f1e49f68..a7ecb802 100644 --- a/src/langtrace_python_sdk/version.py +++ b/src/langtrace_python_sdk/version.py @@ -1 +1 @@ -__version__ = "2.2.5" +__version__ = "2.2.6"