diff --git a/docs/platforms/python/profiling/index.mdx b/docs/platforms/python/profiling/index.mdx
index dc11984f426327..c19f1d2f7764b0 100644
--- a/docs/platforms/python/profiling/index.mdx
+++ b/docs/platforms/python/profiling/index.mdx
@@ -51,10 +51,6 @@ For Profiling to work, you have to first enable [Sentry’s tracing](/concepts/k
-### Upgrading from Older Python SDK Versions
-
-Profiling was experimental in SDK versions `1.17.0` and older. Learn how to upgrade here.
-
## Enable Continuous Profiling
diff --git a/docs/platforms/python/tracing/configure-sampling/index.mdx b/docs/platforms/python/tracing/configure-sampling/index.mdx
new file mode 100644
index 00000000000000..36e6570fceb916
--- /dev/null
+++ b/docs/platforms/python/tracing/configure-sampling/index.mdx
@@ -0,0 +1,348 @@
+---
+title: Configure Sampling
+description: "Learn how to configure sampling in your app."
+sidebar_order: 40
+---
+
+If you find that Sentry's tracing functionality is generating too much data, for example, if you notice your spans quota is quickly being exhausted, you can choose to sample your traces.
+
+Effective sampling is key to getting the most value from Sentry's performance monitoring while minimizing overhead. The Python SDK provides two ways to control the sampling rate. You can review the options and [examples](#trace-sampler-examples) below.
+
+## Sampling Configuration Options
+
+### 1. Uniform Sample Rate (`traces_sample_rate`)
+
+`traces_sample_rate` is a floating-point value between `0.0` and `1.0`, inclusive, which controls the probability with which each transaction will be sampled:
+
+
+
+With `traces_sample_rate` set to `0.25`, each transaction in your application is randomly sampled with a probability of `0.25`, so you can expect that one in every four transactions will be sent to Sentry.
+
+### 2. Sampling Function (`traces_sampler`)
+
+For more granular control, you can provide a `traces_sampler` function. This approach allows you to:
+
+- Apply different sampling rates to different types of transactions
+- Filter out specific transactions entirely
+- Make sampling decisions based on transaction data
+- Control the inheritance of sampling decisions in distributed traces
+- Use custom attributes to modify sampling
+
+
+
+It is strongly recommended when using a custom `traces_sampler` that you respect the parent sampling decision. This ensures your traces will be complete.
+
+
+
+In distributed systems, implementing inheritance logic when trace information is propagated between services will ensure consistent sampling decisions across your entire distributed trace.
+
+
+
+
+Trace Sampler Examples
+
+#### Trace Sampler Examples
+
+1. Prioritizing Critical User Flows
+
+```python
+def traces_sampler(sampling_context):
+ # Use the parent sampling decision if we have an incoming trace.
+ # Note: we strongly recommend respecting the parent sampling decision,
+ # as this ensures your traces will be complete!
+ parent_sampling_decision = sampling_context.get("parent_sampled")
+ if parent_sampling_decision is not None:
+ return float(parent_sampling_decision)
+
+ ctx = sampling_context.get("transaction_context", {})
+ name = ctx.get("name")
+
+ # Sample all checkout transactions
+ if name and ('/checkout' in name or
+ ctx.get("op") == 'checkout'):
+ return 1.0
+
+ # Sample 50% of login transactions
+ if name and ('/login' in name or
+ ctx.get("op") == 'login'):
+ return 0.5
+
+ # Sample 10% of everything else
+ return 0.1
+
+sentry_sdk.init(
+ dsn="your-dsn",
+ traces_sampler=traces_sampler,
+)
+```
+
+2. Handling Different Environments and Error Rates
+
+```python
+def traces_sampler(sampling_context):
+ # Use the parent sampling decision if we have an incoming trace.
+ # Note: we strongly recommend respecting the parent sampling decision,
+ # as this ensures your traces will be complete!
+ parent_sampling_decision = sampling_context.get("parent_sampled")
+ if parent_sampling_decision is not None:
+ return float(parent_sampling_decision)
+
+ ctx = sampling_context.get("transaction_context", {})
+ environment = os.environ.get("ENVIRONMENT", "development")
+
+ # Sample all transactions in development
+ if environment == "development":
+ return 1.0
+
+ # Sample more transactions if there are recent errors
+ # Note: hasRecentErrors is a custom attribute that needs to be set
+ if ctx.get("data", {}).get("hasRecentErrors"):
+ return 0.8
+
+ # Sample based on environment
+ if environment == "production":
+ return 0.05 # 5% in production
+ elif environment == "staging":
+ return 0.2 # 20% in staging
+
+ # Default sampling rate
+ return 0.1
+
+# Initialize the SDK with the sampling function
+sentry_sdk.init(
+ dsn="your-dsn",
+ traces_sampler=traces_sampler,
+)
+
+# You can use the sampling function by setting custom attributes:
+# Option 1: When creating the transaction
+with sentry_sdk.start_transaction(name="GET /api/users", op="http.request") as transaction:
+ # Set custom attribute
+ transaction.set_data("hasRecentErrors", True)
+ # Your code here
+
+# Option 2: During the transaction's lifecycle
+with sentry_sdk.start_transaction(name="GET /api/users", op="http.request") as transaction:
+ # Your code here
+ transaction.set_data("hasRecentErrors", True) # Set custom attribute
+```
+
+3. Controlling Sampling Based on User and Transaction Properties
+
+```python
+def traces_sampler(sampling_context):
+ # Use the parent sampling decision if we have an incoming trace.
+ # Note: we strongly recommend respecting the parent sampling decision,
+ # as this ensures your traces will be complete!
+ parent_sampling_decision = sampling_context.get("parent_sampled")
+ if parent_sampling_decision is not None:
+ return float(parent_sampling_decision)
+
+ ctx = sampling_context.get("transaction_context", {})
+ data = ctx.get("data", {})
+
+ # Always sample for premium users
+ # Note: user.tier is a custom attribute that needs to be set
+ if data.get("user", {}).get("tier") == "premium":
+ return 1.0
+
+ # Sample more transactions for users experiencing errors
+ # Note: hasRecentErrors is a custom attribute
+ if data.get("hasRecentErrors"):
+ return 0.8
+
+ # Sample less for high-volume, low-value paths
+ # Note: name is an SDK-provided attribute
+ if (ctx.get("name") or "").startswith("/api/metrics"):
+ return 0.01
+
+ # Sample more for slow transactions
+ # Note: duration_ms is a custom attribute
+ if data.get("duration_ms", 0) > 1000: # Transactions over 1 second
+ return 0.5
+
+ # Default sampling rate
+ return 0.2
+
+# Initialize the SDK with the sampling function
+sentry_sdk.init(
+ dsn="your-dsn",
+ traces_sampler=traces_sampler,
+)
+
+# To set custom attributes for this example:
+with sentry_sdk.start_transaction(name="GET /api/users", op="http.request") as transaction:
+ # Set custom attributes
+ transaction.set_data("user", {"tier": "premium"}) # Custom user data
+ transaction.set_data("hasRecentErrors", True) # Custom error flag
+ transaction.set_data("duration_ms", 1500) # Custom timing data
+ # Your code here
+
+```
+
+4. Complex Business Logic Sampling
+
+```python
+def traces_sampler(sampling_context):
+ # Use the parent sampling decision if we have an incoming trace.
+ # Note: we strongly recommend respecting the parent sampling decision,
+ # as this ensures your traces will be complete!
+ parent_sampling_decision = sampling_context.get("parent_sampled")
+ if parent_sampling_decision is not None:
+ return float(parent_sampling_decision)
+
+ ctx = sampling_context.get("transaction_context", {})
+ data = ctx.get("data", {})
+
+ # Always sample critical business operations
+ # Note: op is an SDK-provided attribute
+ if ctx.get("op") in ["payment.process", "order.create", "user.verify"]:
+ return 1.0
+
+ # Sample based on user segment
+ # Note: user.segment is a custom attribute
+ user_segment = data.get("user", {}).get("segment")
+ if user_segment == "enterprise":
+ return 0.8
+ elif user_segment == "premium":
+ return 0.5
+
+ # Sample based on transaction value
+ # Note: transaction.value is a custom attribute
+ transaction_value = data.get("transaction", {}).get("value", 0)
+ if transaction_value > 1000: # High-value transactions
+ return 0.7
+
+ # Sample based on error rate in the service
+ # Note: service.error_rate is a custom attribute
+ error_rate = data.get("service", {}).get("error_rate", 0)
+ if error_rate > 0.05: # Error rate above 5%
+ return 0.9
+
+ # Default sampling rate
+ return 0.1
+
+# Initialize the SDK with the sampling function
+sentry_sdk.init(
+ dsn="your-dsn",
+ traces_sampler=traces_sampler,
+)
+
+s# To set custom attributes for this example:
+with sentry_sdk.start_transaction(name="Process Payment", op="payment.process") as transaction:
+ # Set custom attributes
+ transaction.set_data("user", {"segment": "enterprise"}) # Custom user data
+ transaction.set_data("transaction", {"value": 1500}) # Custom transaction data
+ transaction.set_data("service", {"error_rate": 0.03}) # Custom service data
+ # Your code here
+
+```
+
+5. Performance-Based Sampling
+
+```python
+def traces_sampler(sampling_context):
+ # Use the parent sampling decision if we have an incoming trace.
+ # Note: we strongly recommend respecting the parent sampling decision,
+ # as this ensures your traces will be complete!
+ parent_sampling_decision = sampling_context.get("parent_sampled")
+ if parent_sampling_decision is not None:
+ return float(parent_sampling_decision)
+
+ ctx = sampling_context.get("transaction_context", {})
+ data = ctx.get("data", {})
+
+ # Sample all slow transactions
+ # Note: duration_ms is a custom attribute
+ if data.get("duration_ms", 0) > 2000: # Over 2 seconds
+ return 1.0
+
+ # Sample more transactions with high memory usage
+ # Note: memory_usage_mb is a custom attribute
+ if data.get("memory_usage_mb", 0) > 500: # Over 500MB
+ return 0.8
+
+ # Sample more transactions with high CPU usage
+ # Note: cpu_percent is a custom attribute
+ if data.get("cpu_percent", 0) > 80: # Over 80% CPU
+ return 0.8
+
+ # Sample more transactions with high database load
+ # Note: db_connections is a custom attribute
+ if data.get("db_connections", 0) > 100: # Over 100 connections
+ return 0.7
+
+ # Default sampling rate
+ return 0.1
+
+# Initialize the SDK with the sampling function
+ sentry_sdk.init(
+ dsn="your-dsn",
+ traces_sampler=traces_sampler,
+)
+
+# To set custom attributes for this example:
+with sentry_sdk.start_transaction(name="Process Data", op="data.process") as transaction:
+ # Set custom attributes
+ transaction.set_data("duration_ms", 2500) # Custom timing data
+ transaction.set_data("memory_usage_mb", 600) # Custom memory data
+ transaction.set_data("cpu_percent", 85) # Custom CPU data
+ transaction.set_data("db_connections", 120) # Custom database data
+ # Your code here
+
+```
+
+
+## The Sampling Context Object
+
+When the `traces_sampler` function is called, the Sentry SDK passes a `sampling_context` object with information from the relevant span to help make sampling decisions:
+
+```python
+{
+ "transaction_context": {
+ "name": str, # transaction title at creation time (SDK-provided)
+ "op": str, # short description of transaction type (SDK-provided)
+ "data": Optional[Dict[str, Any]] # custom data you've added to the transaction
+ },
+ "parent_sampled": Optional[bool], # whether the parent transaction was sampled (SDK-provided)
+ "parent_sample_rate": Optional[float], # the sample rate used by the parent (SDK-provided)
+ "custom_sampling_context": Optional[Dict[str, Any]] # additional custom data for sampling
+}
+```
+
+### SDK-Provided vs. Custom Attributes
+
+The sampling context contains both SDK-provided attributes and custom attributes:
+
+**SDK-Provided Attributes:**
+- `transaction_context.name`: The name of the transaction
+- `transaction_context.op`: The operation type
+- `parent_sampled`: Whether the parent transaction was sampled
+- `parent_sample_rate`: The sample rate used by the parent
+
+**Custom Attributes:**
+- Any data you add to the `set_data` method on the transaction object. Use this for data that you want to include in the transaction data that gets sent to Sentry.
+- Any data you add to the `custom_sampling_context` parameter in `start_transaction`. Use this for data that you want to use for sampling decisions but don't want to include in the transaction data that gets sent to Sentry. Read more about sampling context [here](/platforms/python/configuration/sampling/#sampling-context).
+
+## Sampling Decision Precedence
+
+When multiple sampling mechanisms could apply, Sentry follows this order of precedence:
+
+1. If a sampling decision is passed to `start_transaction`, that decision is used
+2. If `traces_sampler` is defined, its decision is used. Although the `traces_sampler` can override the parent sampling decision, most users will want to ensure their `traces_sampler` respects the parent sampling decision
+3. If no `traces_sampler` is defined, but there is a parent sampling decision from an incoming distributed trace, we use the parent sampling decision
+4. If neither of the above, `traces_sample_rate` is used
+5. If none of the above are set, no transactions are sampled. This is equivalent to setting `traces_sample_rate=0.0`
+
+## How Sampling Propagates in Distributed Traces
+
+Sentry uses a "head-based" sampling approach:
+
+- A sampling decision is made in the originating service (the "head")
+- This decision is propagated to all downstream services
+
+The two key headers are:
+- `sentry-trace`: Contains trace ID, span ID, and sampling decision
+- `baggage`: Contains additional trace metadata including sample rate
+
+The Sentry Python SDK automatically attaches these headers to outgoing HTTP requests when using auto-instrumentation with libraries like `requests`, `urllib3`, or `httpx`. For other communication channels, you can manually propagate trace information. Learn more about customizing tracing in [custom trace propagation](/platforms/python/tracing/distributed-tracing/custom-trace-propagation/)
diff --git a/docs/platforms/python/tracing/trace-propagation/custom-instrumentation/index.mdx b/docs/platforms/python/tracing/distributed-tracing/custom-trace-propagation/index.mdx
similarity index 63%
rename from docs/platforms/python/tracing/trace-propagation/custom-instrumentation/index.mdx
rename to docs/platforms/python/tracing/distributed-tracing/custom-trace-propagation/index.mdx
index c95628d0bcfd36..738df1dc027f47 100644
--- a/docs/platforms/python/tracing/trace-propagation/custom-instrumentation/index.mdx
+++ b/docs/platforms/python/tracing/distributed-tracing/custom-trace-propagation/index.mdx
@@ -1,6 +1,6 @@
---
-title: Custom Instrumentation
-sidebar_order: 40
+title: Custom Trace Propagation
+sidebar_order: 10
---
diff --git a/docs/platforms/python/tracing/trace-propagation/dealing-with-cors-issues/index.mdx b/docs/platforms/python/tracing/distributed-tracing/dealing-with-cors-issues/index.mdx
similarity index 100%
rename from docs/platforms/python/tracing/trace-propagation/dealing-with-cors-issues/index.mdx
rename to docs/platforms/python/tracing/distributed-tracing/dealing-with-cors-issues/index.mdx
diff --git a/docs/platforms/python/tracing/trace-propagation/index.mdx b/docs/platforms/python/tracing/distributed-tracing/index.mdx
similarity index 90%
rename from docs/platforms/python/tracing/trace-propagation/index.mdx
rename to docs/platforms/python/tracing/distributed-tracing/index.mdx
index fb41f4dd98c48f..13560be7a95b65 100644
--- a/docs/platforms/python/tracing/trace-propagation/index.mdx
+++ b/docs/platforms/python/tracing/distributed-tracing/index.mdx
@@ -1,10 +1,10 @@
---
-title: Trace Propagation
+title: Set Up Distributed Tracing
description: "Learn how to connect events across applications/services."
-sidebar_order: 3000
+sidebar_order: 30
---
-If the overall application landscape that you want to observe with Sentry consists of more than just a single service or application, distributed tracing can add a lot of value.
+
## What is Distributed Tracing?
diff --git a/docs/platforms/python/tracing/trace-propagation/limiting-trace-propagation/index.mdx b/docs/platforms/python/tracing/distributed-tracing/limiting-trace-propagation/index.mdx
similarity index 100%
rename from docs/platforms/python/tracing/trace-propagation/limiting-trace-propagation/index.mdx
rename to docs/platforms/python/tracing/distributed-tracing/limiting-trace-propagation/index.mdx
diff --git a/docs/platforms/python/tracing/instrumentation/index.mdx b/docs/platforms/python/tracing/instrumentation/index.mdx
index eaac9e016be4cb..be7475828ecfc3 100644
--- a/docs/platforms/python/tracing/instrumentation/index.mdx
+++ b/docs/platforms/python/tracing/instrumentation/index.mdx
@@ -1,7 +1,21 @@
---
title: Instrumentation
-description: "Learn how to instrument tracing in your app."
-sidebar_order: 20
+description: "Learn what Sentry instruments automatically, and how to configure spans to capture tracing data on any action in your app."
+sidebar_order: 50
---
-
+
+
+To capture transactions and spans customized to your organization's needs, you must first set up tracing.
+
+
+
+There are two ways that instrumentation is applied to your application:
+
+## Automatic Instrumentation
+
+Many integrations for popular frameworks automatically capture transactions that can be sent to Sentry. Read more about automatic instrumentation [here](/platforms/python/tracing/instrumentation/automatic-instrumentation/).
+
+## Custom Instrumentation
+To add custom performance data to your application, you need to add custom instrumentation in the form of [spans](/concepts/key-terms/tracing/distributed-tracing/#traces-transactions-and-spans). Spans are a way to measure the time it takes for a specific action to occur. For example, you can create a span to measure the time it takes for a function to execute. Learn more about span lifecycles [here](/platforms/python/tracing/span-lifecycle/).
+
diff --git a/docs/platforms/python/tracing/span-lifecycle/index.mdx b/docs/platforms/python/tracing/span-lifecycle/index.mdx
new file mode 100644
index 00000000000000..33018eb5e61841
--- /dev/null
+++ b/docs/platforms/python/tracing/span-lifecycle/index.mdx
@@ -0,0 +1,241 @@
+---
+title: Span Lifecycle
+description: "Learn how to add attributes to spans in Sentry to monitor performance and debug applications."
+sidebar_order: 10
+---
+
+
+
+To capture transactions and spans customized to your organization's needs, you must first set up tracing.
+
+
+
+To add custom performance data to your application, you need to add custom instrumentation in the form of [spans](/concepts/key-terms/tracing/distributed-tracing/#traces-transactions-and-spans). Spans are a way to measure the time it takes for a specific action to occur. For example, you can create a span to measure the time it takes for a function to execute.
+
+
+
+There are two main approaches to creating spans in Python:
+
+- [Using the context manager](#using-the-context-manager): Creates a span with an automatic lifecycle (recommended).
+- [Manual span creation](#creating-spans-manually): Gives you more control over the span's lifecycle.
+
+## Span Lifecycle
+
+In Python, spans are typically created using a context manager, which automatically manages the span's lifecycle. When you create a span using a context manager, the span automatically starts when entering the context and ends when exiting it. This is the recommended approach for most scenarios.
+
+```python
+import sentry_sdk
+
+# Start a span for a task
+with sentry_sdk.start_span(op="task", name="Create User"):
+ # Your code here
+ # The span will automatically end when exiting this block
+ user = create_user(email="user@example.com")
+ send_welcome_email(user)
+ # The span automatically ends here when the 'with' block exits
+```
+
+You can call the context manager's `__enter__` and `__exit__` methods to more explicitly control the span's lifecycle.
+
+## Span Context and Nesting
+
+When you create a span, it becomes the child of the current active span. This allows you to build a hierarchy of spans that represent the execution path of your application:
+
+```python
+import sentry_sdk
+
+with sentry_sdk.start_span(op="process", name="Process Data"):
+ # This code is tracked in the "Process Data" span
+
+ with sentry_sdk.start_span(op="task", name="Validate Input"):
+ # This is now a child span of "Process Data"
+ validate_data()
+
+ with sentry_sdk.start_span(op="task", name="Transform Data"):
+ # Another child span
+ transform_data()
+```
+
+## Span Starting Options
+
+The following options can be used when creating spans:
+
+| Option | Type | Description |
+| ------------- | --------------- | ----------------------------------------------- |
+| `op` | `string` | The operation of the span. |
+| `name` | `string` | The name of the span. |
+| `start_timestamp` | `datetime/float`| The start time of the span. |
+
+## Using the Context Manager
+
+For most scenarios, we recommend using the context manager approach with `sentry_sdk.start_span()`. This creates a new span that automatically starts when entering the context and ends when exiting it.
+
+```python
+import sentry_sdk
+
+with sentry_sdk.start_span(op="db", name="Query Users") as span:
+ # Perform a database query
+ users = db.query("SELECT * FROM users")
+
+ # You can set data on the span
+ span.set_data("user_count", len(users))
+```
+
+The context manager also correctly handles exceptions, marking the span as failed if an exception occurs:
+
+```python
+import sentry_sdk
+
+try:
+ with sentry_sdk.start_span(op="http", name="Call External API"):
+ # If this raises an exception, the span will be marked as failed
+ response = requests.get("https://api.example.com/data")
+ response.raise_for_status()
+except Exception:
+ # The span is already marked as failed and has ended
+ pass
+```
+
+## Getting the Current Span
+
+You can access the currently active span using `sentry_sdk.get_current_span()`:
+
+```python
+import sentry_sdk
+
+# Get the current active span
+current_span = sentry_sdk.get_current_span()
+if current_span:
+ current_span.set_data("key", "value")
+```
+
+## Working with Transactions
+
+[Transactions](/product/insights/overview/transaction-summary/#what-is-a-transaction) are a special type of span that represent a complete operation in your application, such as a web request. You can create transactions explicitly:
+
+```python
+import sentry_sdk
+
+with sentry_sdk.start_transaction(name="Background Task", op="task") as transaction:
+ # Your code here
+
+ # You can add child spans to the transaction
+ with sentry_sdk.start_span(op="subtask", name="Data Processing"):
+ # Process data
+ pass
+```
+
+## Improving Span Data
+
+### Adding Span Attributes
+
+Span attributes customize information you can get through tracing. This information can be found in the traces views in Sentry, once you drill into a span. You can capture additional context with span attributes. These can be key-value pairs of various Python types.
+
+```python
+import sentry_sdk
+
+with sentry_sdk.start_span(op="db", name="Query Users") as span:
+ # Execute the query
+ users = db.query("SELECT * FROM users WHERE active = true")
+
+ # You can add more data during execution
+ span.set_data("result_count", len(users))
+```
+
+You can also add attributes to an existing span:
+
+```python
+import sentry_sdk
+
+# Get the current span
+span = sentry_sdk.get_current_span()
+if span:
+ # Set individual data points
+ span.set_data("user_id", user.id)
+ span.set_data("request_size", len(request.body))
+```
+
+### Adding Attributes to All Spans
+
+To add attributes to all spans, use the `before_send_transaction` callback:
+
+```python
+import sentry_sdk
+
+def before_send_transaction(event):
+ # Add attributes to the root span (transaction)
+ if "trace" in event.get("contexts", {}):
+ if "data" not in event["contexts"]["trace"]:
+ event["contexts"]["trace"]["data"] = {}
+
+ event["contexts"]["trace"]["data"].update({
+ "app_version": "1.2.3",
+ "environment_region": "us-west-2"
+ })
+
+ # Add attributes to all child spans
+ for span in event.get("spans", []):
+ if "data" not in span:
+ span["data"] = {}
+
+ span["data"].update({
+ "component_version": "2.0.0",
+ "deployment_stage": "production"
+ })
+
+ return event
+
+sentry_sdk.init(
+ # Your other Sentry configuration options here
+ before_send_transaction=before_send_transaction
+)
+```
+
+### Adding Span Operations ("op")
+
+Spans can have an operation associated with them, which helps Sentry understand the context of the span. For example, database related spans have the `db` operation, while HTTP requests use `http.client`.
+
+Sentry maintains a [list of well-known span operations](https://develop.sentry.dev/sdk/performance/span-operations/#list-of-operations) that you should use when applicable:
+
+```python
+import sentry_sdk
+
+# HTTP client operation
+with sentry_sdk.start_span(op="http.client", name="Fetch User Data"):
+ response = requests.get("https://api.example.com/users")
+
+# Database operation
+with sentry_sdk.start_span(op="db", name="Save User"):
+ db.execute(
+ "INSERT INTO users (name, email) VALUES (%s, %s)",
+ (user.name, user.email),
+ )
+
+# File I/O operation
+with sentry_sdk.start_span(op="file.read", name="Read Config"):
+ with open("config.json", "r") as f:
+ config = json.load(f)
+```
+
+### Updating the Span Status
+
+You can update the status of a span to indicate whether it succeeded or failed:
+
+```python
+import sentry_sdk
+
+with sentry_sdk.start_span(op="task", name="Process Payment") as span:
+ try:
+ result = process_payment(payment_id)
+ if result.success:
+ # Mark the span as successful
+ span.set_status("ok")
+ else:
+ # Mark the span as failed
+ span.set_status("error")
+ span.set_data("error_reason", result.error)
+ except Exception:
+ # Span will automatically be marked as failed when an exception occurs
+ raise
+```
+
diff --git a/docs/platforms/python/tracing/span-metrics/examples.mdx b/docs/platforms/python/tracing/span-metrics/examples.mdx
new file mode 100644
index 00000000000000..57b7d368a5ce67
--- /dev/null
+++ b/docs/platforms/python/tracing/span-metrics/examples.mdx
@@ -0,0 +1,484 @@
+---
+title: Example Instrumentation
+description: "Examples of using span metrics to debug performance issues and monitor application behavior across Python applications."
+sidebar_order: 10
+---
+
+
+
+These examples assume you have already set up tracing in your application.
+
+
+
+This guide provides practical examples of using span attributes and metrics to solve common monitoring and debugging challenges in Python applications. Each example demonstrates how to instrument different components, showing how they work together within a distributed trace to provide end-to-end visibility.
+
+## File Upload and Processing Pipeline
+
+**Challenge:** Understanding bottlenecks and failures in multi-step file processing operations across request handling and processing services.
+
+**Solution:** Track the entire file processing pipeline with detailed metrics at each stage, from initial file handling through processing and storage.
+
+**Client Application Instrumentation:**
+```python
+# File upload request handling
+import sentry_sdk
+import time
+from pathlib import Path
+import requests
+from contextlib import contextmanager
+
+with sentry_sdk.start_span(op="upload", name="Upload File") as span:
+ try:
+ # Begin upload process with the requests library
+ file_path = Path("/path/to/uploads/user-profile.jpg")
+
+ # Track progress with a context manager
+ @contextmanager
+ def track_progress(total_size):
+ start = time.time()
+ bytes_sent = 0
+
+ class ProgressTracker:
+ def __call__(self, monitor):
+ nonlocal bytes_sent
+ bytes_sent = monitor.bytes_read
+ progress_percent = (bytes_sent / total_size) * 100
+ span.set_data("upload.percent_complete", progress_percent)
+ span.set_data("upload.bytes_transferred", bytes_sent)
+
+ yield ProgressTracker()
+
+ # Record total time after context exits
+ span.set_data("upload.total_time_ms", (time.time() - start) * 1000)
+
+ # Use the progress tracker with requests
+ with track_progress(file_path.stat().st_size) as progress_callback:
+ with open(file_path, "rb") as f:
+ response = requests.post(
+ "https://api.example.com/upload",
+ files={"file": f},
+ headers={"X-Sentry-Trace": sentry_sdk.get_traceparent()}, # Propagate trace context
+ stream=True,
+ hooks={"response": progress_callback}
+ )
+
+ # Set final data after completion
+ span.set_data("upload.success", response.ok)
+ if response.ok:
+ result = response.json()
+ span.set_data("upload.server_file_id", result["file_id"])
+
+ return response
+
+ except Exception as error:
+ # Record failure information
+ span.set_data("upload.success", False)
+ span.set_data("upload.error_type", error.__class__.__name__)
+ span.set_data("upload.error_message", str(error))
+ span.set_status(sentry_sdk.SpanStatus.ERROR)
+ raise
+```
+
+**Server Application Instrumentation:**
+```python
+# File processing service
+import sentry_sdk
+from pathlib import Path
+import boto3
+
+with sentry_sdk.start_span(
+ op="file.process.service",
+ name="File Processing Service"
+) as span:
+ # File processing implementation
+ file_path = Path("/tmp/uploads/user-profile.jpg")
+
+ # Process the file
+ try:
+ # Track individual processing steps
+ with sentry_sdk.start_span(op="scan", name="Virus Scan") as scan_span:
+ # Virus scan implementation
+ scan_span.set_data("scan.engine", "clamav")
+ scan_span.set_data("scan.result", "clean")
+
+ # Upload to S3
+ s3_client = boto3.client('s3', region_name='us-west-2')
+ upload_start = time.time()
+ s3_client.upload_file(
+ str(file_path),
+ 'my-bucket',
+ 'uploads/user-profile.jpg'
+ )
+
+ span.set_data("storage.actual_upload_time_ms",
+ (time.time() - upload_start) * 1000)
+
+ except Exception as e:
+ span.set_status(sentry_sdk.SpanStatus.ERROR)
+ span.set_data("error.message", str(e))
+ raise
+```
+
+**How the Trace Works Together:**
+The client application span initiates the trace and handles the file upload. It propagates the trace context to the server through the request headers. The server span continues the trace, processing the file and storing it. This creates a complete picture of the file's journey, allowing you to:
+
+- Identify bottlenecks at any stage (request preparation, network transfer, processing, storage)
+- Track end-to-end processing times and success rates
+- Monitor resource usage across the stack
+- Correlate request handling issues with processing service errors
+
+## LLM Integration Monitoring
+
+**Challenge:** Managing cost (token usage) and performance of LLM integrations in Python applications.
+
+**Solution:** Tracking of the entire LLM interaction flow, from initial request through response processing.
+
+**Client Application Instrumentation:**
+```python
+# LLM request handling in a Flask application
+import sentry_sdk
+import time
+import openai
+from flask import jsonify
+
+@app.route("/ask", methods=["POST"])
+def handle_llm_request():
+ with sentry_sdk.start_span(op="llm", name="Generate Text") as span:
+ start_time = time.time() * 1000 # Convert to milliseconds
+
+ # Begin streaming response from LLM API
+ user_input = request.json["question"]
+
+ response_chunks = []
+ first_token_received = False
+ tokens_received = 0
+
+ # Using OpenAI's streaming API
+ try:
+ for chunk in openai.ChatCompletion.create(
+ model="gpt-4",
+ messages=[{"role": "user", "content": user_input}],
+ stream=True
+ ):
+ tokens_received += 1
+ content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
+ response_chunks.append(content)
+
+ # Record time to first token
+ if not first_token_received and content:
+ first_token_received = True
+ time_to_first_token = (time.time() * 1000) - start_time
+ span.set_data("response.time_to_first_token_ms", time_to_first_token)
+
+ # Record final metrics after stream completes
+ total_request_time = (time.time() * 1000) - start_time
+
+ span.set_data("response.total_time_ms", total_request_time)
+ span.set_data("response.format", "text")
+ span.set_data("response.tokens_received", tokens_received)
+
+ return jsonify({
+ "response": "".join(response_chunks),
+ "tokens": tokens_received
+ })
+
+ except Exception as error:
+ span.set_status(sentry_sdk.SpanStatus.ERROR)
+ span.set_data("error.type", error.__class__.__name__)
+ span.set_data("error.message", str(error))
+ return jsonify({"error": str(error)}), 500
+```
+
+**Server Application Instrumentation:**
+```python
+# LLM processing service (e.g., in a separate microservice)
+import sentry_sdk
+import time
+import openai
+
+def process_llm_request(request_data):
+ with sentry_sdk.start_span(
+ op="llm",
+ name="Generate Text"
+ ) as span:
+ start_time = int(time.time() * 1000) # Current time in milliseconds
+
+ try:
+ # Check rate limits before processing
+ rate_limits = check_rate_limits()
+ span.set_data("llm.rate_limit_remaining", rate_limits["remaining"])
+
+ # Prepare the prompt with additional context
+ prepared_prompt = enhance_prompt(request_data["question"])
+
+ # Make the actual API call to the LLM provider
+ response = openai.ChatCompletion.create(
+ model="gpt-4",
+ messages=[{"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": prepared_prompt}],
+ temperature=0.7,
+ max_tokens=4096
+ )
+
+ # Track token usage and performance metrics
+ span.set_data("llm.prompt_tokens", response.usage.prompt_tokens)
+ span.set_data("llm.completion_tokens", response.usage.completion_tokens)
+ span.set_data("llm.total_tokens", response.usage.total_tokens)
+ span.set_data("llm.api_latency_ms", int(time.time() * 1000) - start_time)
+
+ # Calculate and record cost based on token usage
+ cost = calculate_cost(
+ response.usage.prompt_tokens,
+ response.usage.completion_tokens,
+ "gpt-4"
+ )
+ span.set_data("llm.cost_usd", cost)
+
+ return {
+ "response": response.choices[0].message.content,
+ "usage": response.usage
+ }
+
+ except Exception as error:
+ # Track error information
+ span.set_data("error", True)
+ span.set_data("error.type", error.__class__.__name__)
+ span.set_data("error.message", str(error))
+
+ # Check if it's a rate limit error
+ is_rate_limit = "rate_limit" in str(error).lower()
+ span.set_data("error.is_rate_limit", is_rate_limit)
+ span.set_status(sentry_sdk.SpanStatus.ERROR)
+
+ raise
+```
+
+**How the Trace Works Together:**
+The client application span captures the initial request handling, while the server span tracks the actual LLM API interaction. The distributed trace shows the complete flow from input to response, enabling you to:
+
+- Analyze end-to-end response times
+- Track costs and token usage patterns
+- Optimize API integration performance
+- Monitor rate limits and service quotas
+- Correlate user inputs with model performance
+
+## E-Commerce Transaction Flow
+
+**Challenge:** Understanding the complete purchase flow and identifying revenue-impacting issues across the application stack.
+
+**Solution:** Track the full transaction process from API request to order fulfillment.
+
+**Client Application Instrumentation:**
+```python
+# Django view handling checkout request
+import sentry_sdk
+import time
+from django.views import View
+from django.http import JsonResponse
+
+class CheckoutView(View):
+ def post(self, request):
+ with sentry_sdk.start_span(op="order", name="Process Order") as span:
+ # Validate the checkout request
+ validation_start = time.time()
+ validation_result = self.validate_checkout_data(request.POST)
+ span.set_data("request.validation_time_ms",
+ (time.time() - validation_start) * 1000)
+
+ if not validation_result["valid"]:
+ span.set_data("request.validation_success", False)
+ span.set_data("request.validation_errors", validation_result["errors"])
+ return JsonResponse({"errors": validation_result["errors"]}, status=400)
+
+ # Process the order
+ try:
+ order_result = self.process_order(request)
+
+ # Update span with order results
+ span.set_data("order.id", order_result["order_id"])
+ span.set_data("order.success", True)
+
+ # Clear the cart and return success
+ request.session["cart"] = []
+ request.session["cart_total"] = 0
+
+ return JsonResponse({"order_id": order_result["order_id"]})
+
+ except Exception as e:
+ span.set_status(sentry_sdk.SpanStatus.ERROR)
+ span.set_data("order.success", False)
+ span.set_data("error.message", str(e))
+ return JsonResponse({"error": str(e)}, status=500)
+```
+
+**Server Application Instrumentation:**
+```python
+# Order processing service
+import sentry_sdk
+import stripe
+from decimal import Decimal
+
+def process_order(order_data):
+ with sentry_sdk.start_span(
+ op="inventory",
+ name="Check Inventory"
+ ) as span:
+ try:
+ # Check inventory availability
+ inventory_start = time.time()
+ inventory_result = check_inventory(order_data["items"])
+ span.set_data("inventory.check_time_ms",
+ (time.time() - inventory_start) * 1000)
+ span.set_data("inventory.all_available", inventory_result["all_available"])
+
+ if not inventory_result["all_available"]:
+ span.set_data("inventory.unavailable_items",
+ inventory_result["unavailable_items"])
+ raise ValueError("Some items are out of stock")
+
+ # Process payment via Stripe
+ payment_start = time.time()
+ stripe.api_key = "sk_test_..."
+ payment_intent = stripe.PaymentIntent.create(
+ amount=int(Decimal(order_data["total"]) * 100), # Convert to cents
+ currency="usd",
+ payment_method=order_data["payment_method_id"],
+ confirm=True
+ )
+
+ span.set_data("payment.processing_time_ms",
+ (time.time() - payment_start) * 1000)
+ span.set_data("payment.transaction_id", payment_intent.id)
+ span.set_data("payment.success", payment_intent.status == "succeeded")
+
+ # Create fulfillment record
+ fulfillment = create_fulfillment(order_data["order_id"])
+ span.set_data("fulfillment.id", fulfillment["id"])
+ span.set_data("fulfillment.warehouse", fulfillment["warehouse"])
+ span.set_data("fulfillment.shipping_method", fulfillment["shipping_method"])
+ span.set_data("fulfillment.estimated_delivery",
+ fulfillment["estimated_delivery"].isoformat())
+
+ return {
+ "success": True,
+ "order_id": order_data["order_id"],
+ "payment_id": payment_intent.id,
+ "fulfillment_id": fulfillment["id"]
+ }
+
+ except Exception as e:
+ span.set_status(sentry_sdk.SpanStatus.ERROR)
+ span.set_data("error.message", str(e))
+ span.set_data("order.success", False)
+ raise
+```
+
+**How the Trace Works Together:**
+The client application span tracks the initial order request, while the server span handles order processing and fulfillment. The distributed trace provides visibility into the entire purchase flow, allowing you to:
+
+- Analyze transaction performance and success rates
+- Track payment processing timing and errors
+- Monitor inventory availability impact on conversions
+- Measure end-to-end order completion times
+- Identify friction points in the transaction process
+
+## Data Processing Pipeline
+
+**Challenge:** Understanding performance and reliability of distributed data processing pipelines, from job submission through completion.
+
+**Solution:** Comprehensive tracking of job lifecycle across queue management, processing stages, and worker performance.
+
+**Client Application Instrumentation:**
+```python
+# Celery task submission
+import sentry_sdk
+from celery import shared_task
+import time
+from datetime import datetime, timedelta
+
+def submit_processing_job(data_file_path, priority="medium"):
+ with sentry_sdk.start_span(op="job", name="Process Job") as span:
+ # Submit job to Celery
+ try:
+ # Configure task and submit
+ task = process_data_file.apply_async(
+ args=[str(data_file_path)],
+ kwargs={"priority": priority},
+ queue="data_processing"
+ )
+
+ span.set_data("job.id", task.id)
+ span.set_data("job.submission_success", True)
+
+ # Start monitoring task progress
+ monitoring_result = setup_task_monitoring(task.id)
+ span.set_data("monitor.callback_url", monitoring_result["callback_url"])
+
+ return {
+ "job_id": task.id,
+ "status": "submitted",
+ "monitoring_url": monitoring_result["status_url"]
+ }
+
+ except Exception as e:
+ span.set_status(sentry_sdk.SpanStatus.ERROR)
+ span.set_data("job.submission_success", False)
+ span.set_data("error.message", str(e))
+ raise
+
+@shared_task(name="tasks.process_data_file")
+def process_data_file(file_path, priority="medium"):
+ with sentry_sdk.start_span(
+ op="process",
+ name="Process Data"
+ ) as span:
+ try:
+ # Processing implementation with stage tracking
+ span.set_data("processing.current_stage", "parse")
+ data = parse_data_file(file_path)
+
+ span.set_data("processing.current_stage", "transform")
+ transformed_data = transform_data(data)
+
+ span.set_data("processing.current_stage", "validate")
+ validation_result = validate_data(transformed_data)
+ span.set_data("validation.errors_count", len(validation_result["errors"]))
+
+ span.set_data("processing.current_stage", "export")
+ export_result = export_processed_data(transformed_data)
+
+ # Record resource utilization
+ span.set_data("resource.cpu_percent", psutil.cpu_percent())
+ span.set_data("resource.memory_used_mb",
+ psutil.Process().memory_info().rss / (1024 * 1024))
+
+ # Update final job outcome
+ span.set_data("outcome.status", "completed")
+ span.set_data("outcome.records_processed", len(data))
+ span.set_data("outcome.output_size_bytes",
+ os.path.getsize(export_result["output_path"]))
+
+ return {
+ "success": True,
+ "records_processed": len(data),
+ "output_path": export_result["output_path"]
+ }
+
+ except Exception as e:
+ span.set_status(sentry_sdk.SpanStatus.ERROR)
+ span.set_data("outcome.status", "failed")
+ span.set_data("error.message", str(e))
+ span.set_data("error.stage", span.get_data("processing.current_stage"))
+ raise
+```
+
+**How the Trace Works Together:**
+This example shows both the job submission and processing as a single trace, which is common in Celery/distributed task patterns. The spans track the entire job lifecycle, enabling you to:
+
+- Monitor task queue health and processing times
+- Track worker resource utilization
+- Identify bottlenecks in specific processing stages
+- Analyze job scheduling efficiency and queue wait times
+- Monitor data throughput and error rates
+
+For more information about implementing these examples effectively, see our Span Metrics guide which includes detailed best practices and implementation guidelines.
diff --git a/docs/platforms/python/tracing/span-metrics/index.mdx b/docs/platforms/python/tracing/span-metrics/index.mdx
new file mode 100644
index 00000000000000..c2c65875aedb95
--- /dev/null
+++ b/docs/platforms/python/tracing/span-metrics/index.mdx
@@ -0,0 +1,143 @@
+---
+title: Sending Span Metrics
+description: "Learn how to add attributes to spans in Sentry to monitor performance and debug applications "
+sidebar_order: 20
+---
+
+
+
+To use span metrics, you must first configure tracing in your application.
+
+
+
+Span metrics allow you to extend the default metrics that are collected by tracing and track custom performance data and debugging information within your application's traces. There are two main approaches to instrumenting metrics:
+
+1. [Adding metrics to existing spans](#adding-metrics-to-existing-spans)
+2. [Creating dedicated spans with custom metrics](#creating-dedicated-metric-spans)
+
+## Adding Metrics to Existing Spans
+
+You can enhance existing spans with custom metrics by adding data. This is useful when you want to augment automatic instrumentation or add contextual data to spans you've already created.
+
+```python
+span = sentry_sdk.get_current_span()
+if span:
+ # Add individual metrics
+ span.set_data("database.rows_affected", 42)
+ span.set_data("cache.hit_rate", 0.85)
+
+ # Add multiple metrics at once
+ span.set_data({
+ "memory.heap_used": 1024000,
+ "queue.length": 15,
+ "processing.duration_ms": 127
+ })
+```
+
+### Best Practices for Span Data
+
+When adding metrics as span data:
+
+- Use consistent naming conventions (for example, `category.metric_name`)
+- Keep attribute names concise but descriptive
+- Use appropriate data types (string, number, boolean, or an array containing only one of these types)
+
+## Creating Dedicated Metric Spans
+
+For more detailed operations, tasks, or process tracking, you can create custom dedicated spans that focus on specific metrics or attributes that you want to track. This approach provides better discoverability and more precise span configurations, however it can also create more noise in your trace waterfall.
+
+```python
+with sentry_sdk.start_span(
+ op="db.metrics",
+ name="Database Query Metrics"
+) as span:
+ # Set metrics after creating the span
+ span.set_data({
+ "db.query_type": "SELECT",
+ "db.table": "users",
+ "db.execution_time_ms": 45,
+ "db.rows_returned": 100,
+ "db.connection_pool_size": 5
+ })
+ # Your database operation here
+ pass
+```
+
+For detailed examples of how to implement span metrics in common scenarios, see our Span Metrics Examples guide.
+
+## Adding Metrics to All Spans
+
+To consistently add metrics across all spans in your application, you can use the `before_send_transaction` callback:
+
+```python
+def before_send_transaction(event):
+ # Add metrics to the root span
+ if "trace" in event.get("contexts", {}):
+ if "data" not in event["contexts"]["trace"]:
+ event["contexts"]["trace"]["data"] = {}
+
+ event["contexts"]["trace"]["data"].update({
+ "app.version": "1.2.3",
+ "environment.region": "us-west-2"
+ })
+
+ # Add metrics to all child spans
+ for span in event.get("spans", []):
+ if "data" not in span:
+ span["data"] = {}
+
+ span["data"].update({
+ "app.component_version": "2.0.0",
+ "app.deployment_stage": "production"
+ })
+
+ return event
+
+sentry_sdk.init(
+ # Your other Sentry configuration options here
+ before_send_transaction=before_send_transaction
+)
+```
+
+## Best Practices for Span Metrics
+
+1. **Metric Naming**
+ - Use clear, consistent naming patterns
+ - Include the metric category (examples: `db`, `cache`, `http`)
+ - Use snake_case for metric names
+
+2. **Data Types**
+ - Use appropriate numeric types for measurements
+ - Use booleans for status flags
+ - Use strings for categorical data
+ - Use arrays when grouping related values
+
+3. **Performance Considerations**
+ - Consider the overhead of metric collection
+ - Use sampling when collecting high-frequency metrics
+ - Balance metric granularity with system performance
+
+4. **Debugging and Monitoring**
+ - Include correlation IDs for related operations
+ - Add context that helps with troubleshooting
+
+## Best Practices for Implementation
+
+When implementing span metrics in your application:
+
+1. **Start Small and Iterate**
+ - Begin with basic metrics that directly relate to your debugging or performance monitoring needs
+ - Add more detailed tracking as specific debugging needs emerge
+ - Remove metrics that aren't providing actionable insights
+
+2. **Maintain Consistency**
+ - Use consistent naming patterns across your application
+ - Document metric meanings and units in your codebase
+ - Share common metrics across similar operations
+
+3. **Focus on Actionability**
+ - Track metrics that help diagnose specific issues
+ - Consider what alerts or dashboard visualizations you'll want to create
+ - Ensure metrics can drive issue resolution or decision making
+
+For detailed examples of how to implement span metrics in common scenarios, see our Span Metrics Examples guide.
diff --git a/docs/platforms/python/tracing/instrumentation/performance-metrics.mdx b/docs/platforms/python/tracing/span-metrics/performance-metrics.mdx
similarity index 51%
rename from docs/platforms/python/tracing/instrumentation/performance-metrics.mdx
rename to docs/platforms/python/tracing/span-metrics/performance-metrics.mdx
index 3ec28c7af4f260..409c4b778e6d02 100644
--- a/docs/platforms/python/tracing/instrumentation/performance-metrics.mdx
+++ b/docs/platforms/python/tracing/span-metrics/performance-metrics.mdx
@@ -1,7 +1,9 @@
---
-title: Performance Metrics
-description: "Learn how to attach performance metrics to your transactions."
+title: Sending Performance Metrics
+description: "Learn how to attach performance metrics to your Sentry transactions."
sidebar_order: 20
+notSupported:
+ - javascript.cordova
---
Sentry's SDKs support sending performance metrics data to Sentry. These are numeric values attached to transactions that are aggregated and displayed in Sentry.
@@ -24,4 +26,39 @@ Sentry supports adding arbitrary custom units, but we recommend using one of the
## Supported Measurement Units
-
+Units augment measurement values by giving meaning to what otherwise might be abstract numbers. Adding units also allows Sentry to offer controls - unit conversions, filters, and so on - based on those units. For values that are unitless, you can supply an empty string or `none`.
+
+### Duration Units
+
+- `nanosecond`
+- `microsecond`
+- `millisecond`
+- `second`
+- `minute`
+- `hour`
+- `day`
+- `week`
+
+### Information Units
+
+- `bit`
+- `byte`
+- `kilobyte`
+- `kibibyte`
+- `megabyte`
+- `mebibyte`
+- `gigabyte`
+- `gibibyte`
+- `terabyte`
+- `tebibyte`
+- `petabyte`
+- `pebibyte`
+- `exabyte`
+- `exbibyte`
+
+### Fraction Units
+
+- `ratio`
+- `percent`
+
+If you want to explore further, you can find details about supported units in our [event ingestion documentation](https://getsentry.github.io/relay/relay_metrics/enum.MetricUnit.html).
diff --git a/docs/platforms/python/tracing/troubleshooting/index.mdx b/docs/platforms/python/tracing/troubleshooting/index.mdx
new file mode 100644
index 00000000000000..29763dc64e9353
--- /dev/null
+++ b/docs/platforms/python/tracing/troubleshooting/index.mdx
@@ -0,0 +1,92 @@
+---
+title: Troubleshooting
+description: "Learn how to troubleshoot your tracing setup."
+sidebar_order: 60
+---
+
+If you need help managing transactions, you can read more here. If you need additional help, you can ask on GitHub. Customers on a paid plan may also contact support.
+
+## Group Transactions
+
+When Sentry captures transactions, they are assigned a transaction name. This name is generally auto-generated by the Sentry SDK based on the framework integrations you are using. If you can't leverage the automatic transaction generation (or want to customize how transaction names are generated) you can use the scope API to set transaction names or use a custom event processor.
+
+For example, to set a transaction name:
+
+```python
+import sentry_sdk
+
+# Setting the transaction name directly on the current scope
+scope = sentry_sdk.get_current_scope()
+scope.set_transaction_name("UserListView")
+```
+
+You can define a custom `before_send_transaction` callback to modify transaction names:
+
+```python
+import sentry_sdk
+from sentry_sdk.integrations.django import DjangoIntegration
+
+def transaction_processor(event, hint):
+ if event.get("type") == "transaction":
+ # Extract path from transaction name
+ transaction_name = event.get("transaction", "")
+
+ # Remove variable IDs from URLs to reduce cardinality
+ if "/user/" in transaction_name:
+ # Convert /user/123/ to /user/:id/
+ import re
+ event["transaction"] = re.sub(r'/user/\d+/', '/user/:id/', transaction_name)
+
+ return event
+
+sentry_sdk.init(
+ dsn="your-dsn",
+ integrations=[DjangoIntegration()],
+ traces_sample_rate=1.0,
+ # Add your event processor during SDK initialization
+ before_send_transaction=transaction_processor,
+)
+```
+
+## Control Data Truncation
+
+Currently, every tag has a maximum character limit of 200 characters. Tags over the 200 character limit will become truncated, losing potentially important information. To retain this data, you can split data over several tags instead.
+
+For example, a 200+ character tag like this:
+
+`https://empowerplant.io/api/0/projects/ep/setup_form/?user_id=314159265358979323846264338327&tracking_id=EasyAsABC123OrSimpleAsDoReMi&product_name=PlantToHumanTranslator&product_id=161803398874989484820458683436563811772030917980576`
+
+...will become truncated to:
+
+`https://empowerplant.io/api/0/projects/ep/setup_form/?user_id=314159265358979323846264338327&tracking_id=EasyAsABC123OrSimpleAsDoReMi&product_name=PlantToHumanTranslator&product_id=1618033988749894848`
+
+Using `span.set_tag` for shorter values, in combination with `span.set_data` maintains the details.
+
+```python
+import sentry_sdk
+
+# ...
+
+base_url = "https://empowerplant.io"
+endpoint = "/api/0/projects/ep/setup_form"
+parameters = {
+ "user_id": 314159265358979323846264338327,
+ "tracking_id": "EasyAsABC123OrSimpleAsDoReMi",
+ "product_name": "PlantToHumanTranslator",
+ "product_id": 161803398874989484820458683436563811772030917980576,
+}
+
+with sentry_sdk.start_span(op="request", name="setup form") as span:
+ span.set_tag("base_url", base_url)
+ span.set_tag("endpoint", endpoint)
+ span.set_data("parameters", parameters)
+ make_request(
+ "{base_url}/{endpoint}/".format(
+ base_url=base_url,
+ endpoint=endpoint,
+ ),
+ data=parameters
+ )
+
+ # ...
+```
diff --git a/platform-includes/performance/traces-sampler-as-sampler/python.mdx b/platform-includes/performance/traces-sampler-as-sampler/python.mdx
index 73b9b2e37ad166..e12cf7aed16a32 100644
--- a/platform-includes/performance/traces-sampler-as-sampler/python.mdx
+++ b/platform-includes/performance/traces-sampler-as-sampler/python.mdx
@@ -4,6 +4,13 @@ def traces_sampler(sampling_context):
# along with anything in the global namespace to compute the sample rate
# or sampling decision for this transaction
+ # Use the parent sampling decision if we have an incoming trace.
+ # Note: we strongly recommend respecting the parent sampling decision,
+ # as this ensures your traces will be complete!
+ parent_sampling_decision = sampling_context.get("parent_sampled")
+ if parent_sampling_decision is not None:
+ return float(parent_sampling_decision)
+
if "...":
# These are important - take a big sample
return 0.5
diff --git a/redirects.js b/redirects.js
index 47c501e1e99be2..606ec8f626ab1f 100644
--- a/redirects.js
+++ b/redirects.js
@@ -994,10 +994,14 @@ const userDocsRedirects = [
{
source: '/platforms/kotlin-multiplatform/:path*',
destination: '/platforms/kotlin/guides/kotlin-multiplatform/:path*',
+ },
+ {
+ source: '/platforms/python/tracing/trace-propagation/:path*',
+ destination: '/platforms/python/tracing/distributed-tracing/:path*',
},
{
- source: '/platforms/python/:productfeature/troubleshooting/:path*',
- destination: '/platforms/python/troubleshooting/:path*',
+ source: '/platforms/python/tracing/distributed-tracing/custom-instrumentation',
+ destination: '/platforms/python/tracing/distributed-tracing/custom-trace-propagation',
},
{
source: '/platforms/ruby/guides/:guide/:productfeature/troubleshooting/:path*',