Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "3.36.0"
".": "3.37.0"
}
13 changes: 13 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,18 @@
# Changelog

## 3.37.0 (2026-04-28)

Full Changelog: [v3.36.0...v3.37.0](https://github.com/supermemoryai/python-sdk/compare/v3.36.0...v3.37.0)

### Features

* support setting headers via env ([9d86a3c](https://github.com/supermemoryai/python-sdk/commit/9d86a3c975b1656c9d32816580557207b771ae30))


### Bug Fixes

* use correct field name format for multipart file arrays ([a94f7a1](https://github.com/supermemoryai/python-sdk/commit/a94f7a1c6bcae8d9d0c0b5d33b895efbcdf1a123))

## 3.36.0 (2026-04-25)

Full Changelog: [v3.35.0...v3.36.0](https://github.com/supermemoryai/python-sdk/compare/v3.35.0...v3.36.0)
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "supermemory"
version = "3.36.0"
version = "3.37.0"
description = "The official Python library for the supermemory API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down
19 changes: 19 additions & 0 deletions src/supermemory/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
)
from ._utils import (
is_given,
is_mapping_t,
maybe_transform,
get_async_library,
async_maybe_transform,
Expand Down Expand Up @@ -114,6 +115,15 @@ def __init__(
if base_url is None:
base_url = f"https://api.supermemory.ai"

custom_headers_env = os.environ.get("SUPERMEMORY_CUSTOM_HEADERS")
if custom_headers_env is not None:
parsed: dict[str, str] = {}
for line in custom_headers_env.split("\n"):
colon = line.find(":")
if colon >= 0:
parsed[line[:colon].strip()] = line[colon + 1 :].strip()
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[Medium] Malformed header lines produce empty header names instead of failing fast

A line like ': bad' passes the colon >= 0 check and inserts "" (empty string) as a header name. This doesn't raise at construction time — it propagates into the request and causes a generic runtime error later, making the root cause hard to diagnose.

Add a guard to skip (or raise) when the header name is empty after stripping:

name = line[:colon].strip()
if not name:
    continue  # or raise ValueError(f"Invalid header line: {line!r}")
parsed[name] = line[colon + 1:].strip()

default_headers = {**parsed, **(default_headers if is_mapping_t(default_headers) else {})}
Comment on lines +118 to +125
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[High] Env-supplied headers can silently override the SDK's Authorization header

parsed is placed at the front of the merge ({**parsed, **(default_headers or {})}), but default_headers are applied before auth_headers in the base client. This means setting SUPERMEMORY_CUSTOM_HEADERS='Authorization: Bearer evil' will replace the bearer token derived from api_key, redirecting all requests to the wrong credentials without any warning.

Consider stripping or blocking reserved headers (Authorization, X-Api-Key, etc.) from parsed before merging, or placing parsed after auth_headers in the resolution order so auth always wins.


super().__init__(
version=__version__,
base_url=base_url,
Expand Down Expand Up @@ -440,6 +450,15 @@ def __init__(
if base_url is None:
base_url = f"https://api.supermemory.ai"

custom_headers_env = os.environ.get("SUPERMEMORY_CUSTOM_HEADERS")
if custom_headers_env is not None:
parsed: dict[str, str] = {}
for line in custom_headers_env.split("\n"):
colon = line.find(":")
if colon >= 0:
parsed[line[:colon].strip()] = line[colon + 1 :].strip()
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[Medium] Same empty header name issue in AsyncSupermemory

Identical to the sync client: malformed lines (e.g. ': bad') produce an empty header name that propagates silently into requests. Same guard needed here.

default_headers = {**parsed, **(default_headers if is_mapping_t(default_headers) else {})}
Comment on lines +453 to +460
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[High] Same Authorization override issue as in Supermemory (sync client)

Identical logic in AsyncSupermemory.__init__SUPERMEMORY_CUSTOM_HEADERS can silently replace the bearer token. Same fix applies: block reserved auth headers or ensure auth_headers always takes precedence.


super().__init__(
version=__version__,
base_url=base_url,
Expand Down
8 changes: 2 additions & 6 deletions src/supermemory/_qs.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,13 @@

from typing import Any, List, Tuple, Union, Mapping, TypeVar
from urllib.parse import parse_qs, urlencode
from typing_extensions import Literal, get_args
from typing_extensions import get_args

from ._types import NotGiven, not_given
from ._types import NotGiven, ArrayFormat, NestedFormat, not_given
from ._utils import flatten

_T = TypeVar("_T")


ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
NestedFormat = Literal["dots", "brackets"]

PrimitiveData = Union[str, int, float, bool, None]
# this should be Data = Union[PrimitiveData, "List[Data]", "Tuple[Data]", "Mapping[str, Data]"]
# https://github.com/microsoft/pyright/issues/3555
Expand Down
3 changes: 3 additions & 0 deletions src/supermemory/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@
ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
_T = TypeVar("_T")

ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
NestedFormat = Literal["dots", "brackets"]


# Approximates httpx internal ProxiesTypes and RequestFiles types
# while adding support for `PathLike` instances
Expand Down
42 changes: 34 additions & 8 deletions src/supermemory/_utils/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
)
from pathlib import Path
from datetime import date, datetime
from typing_extensions import TypeGuard
from typing_extensions import TypeGuard, get_args

import sniffio

from .._types import Omit, NotGiven, FileTypes, HeadersLike
from .._types import Omit, NotGiven, FileTypes, ArrayFormat, HeadersLike

_T = TypeVar("_T")
_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
Expand All @@ -40,25 +40,45 @@ def extract_files(
query: Mapping[str, object],
*,
paths: Sequence[Sequence[str]],
array_format: ArrayFormat = "brackets",
) -> list[tuple[str, FileTypes]]:
"""Recursively extract files from the given dictionary based on specified paths.

A path may look like this ['foo', 'files', '<array>', 'data'].

``array_format`` controls how ``<array>`` segments contribute to the emitted
field name. Supported values: ``"brackets"`` (``foo[]``), ``"repeat"`` and
``"comma"`` (``foo``), ``"indices"`` (``foo[0]``, ``foo[1]``).

Note: this mutates the given dictionary.
"""
files: list[tuple[str, FileTypes]] = []
for path in paths:
files.extend(_extract_items(query, path, index=0, flattened_key=None))
files.extend(_extract_items(query, path, index=0, flattened_key=None, array_format=array_format))
return files


def _array_suffix(array_format: ArrayFormat, array_index: int) -> str:
if array_format == "brackets":
return "[]"
if array_format == "indices":
return f"[{array_index}]"
if array_format == "repeat" or array_format == "comma":
# Both repeat the bare field name for each file part; there is no
# meaningful way to comma-join binary parts.
return ""
raise NotImplementedError(
f"Unknown array_format value: {array_format}, choose from {', '.join(get_args(ArrayFormat))}"
)


def _extract_items(
obj: object,
path: Sequence[str],
*,
index: int,
flattened_key: str | None,
array_format: ArrayFormat,
) -> list[tuple[str, FileTypes]]:
try:
key = path[index]
Expand All @@ -75,9 +95,11 @@ def _extract_items(

if is_list(obj):
files: list[tuple[str, FileTypes]] = []
for entry in obj:
assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "")
files.append((flattened_key + "[]", cast(FileTypes, entry)))
for array_index, entry in enumerate(obj):
suffix = _array_suffix(array_format, array_index)
emitted_key = (flattened_key + suffix) if flattened_key else suffix
assert_is_file_content(entry, key=emitted_key)
files.append((emitted_key, cast(FileTypes, entry)))
return files

assert_is_file_content(obj, key=flattened_key)
Expand Down Expand Up @@ -106,6 +128,7 @@ def _extract_items(
path,
index=index,
flattened_key=flattened_key,
array_format=array_format,
)
elif is_list(obj):
if key != "<array>":
Expand All @@ -117,9 +140,12 @@ def _extract_items(
item,
path,
index=index,
flattened_key=flattened_key + "[]" if flattened_key is not None else "[]",
flattened_key=(
(flattened_key if flattened_key is not None else "") + _array_suffix(array_format, array_index)
),
array_format=array_format,
)
for item in obj
for array_index, item in enumerate(obj)
]
)

Expand Down
2 changes: 1 addition & 1 deletion src/supermemory/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "supermemory"
__version__ = "3.36.0" # x-release-please-version
__version__ = "3.37.0" # x-release-please-version
28 changes: 23 additions & 5 deletions tests/test_extract_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import pytest

from supermemory._types import FileTypes
from supermemory._types import FileTypes, ArrayFormat
from supermemory._utils import extract_files


Expand Down Expand Up @@ -37,10 +37,7 @@ def test_multiple_files() -> None:

def test_top_level_file_array() -> None:
query = {"files": [b"file one", b"file two"], "title": "hello"}
assert extract_files(query, paths=[["files", "<array>"]]) == [
("files[]", b"file one"),
("files[]", b"file two"),
]
assert extract_files(query, paths=[["files", "<array>"]]) == [("files[]", b"file one"), ("files[]", b"file two")]
assert query == {"title": "hello"}


Expand Down Expand Up @@ -71,3 +68,24 @@ def test_ignores_incorrect_paths(
expected: list[tuple[str, FileTypes]],
) -> None:
assert extract_files(query, paths=paths) == expected


@pytest.mark.parametrize(
"array_format,expected_top_level,expected_nested",
[
("brackets", [("files[]", b"a"), ("files[]", b"b")], [("items[][file]", b"a"), ("items[][file]", b"b")]),
("repeat", [("files", b"a"), ("files", b"b")], [("items[file]", b"a"), ("items[file]", b"b")]),
("comma", [("files", b"a"), ("files", b"b")], [("items[file]", b"a"), ("items[file]", b"b")]),
("indices", [("files[0]", b"a"), ("files[1]", b"b")], [("items[0][file]", b"a"), ("items[1][file]", b"b")]),
],
)
def test_array_format_controls_file_field_names(
array_format: ArrayFormat,
expected_top_level: list[tuple[str, FileTypes]],
expected_nested: list[tuple[str, FileTypes]],
) -> None:
top_level = {"files": [b"a", b"b"]}
assert extract_files(top_level, paths=[["files", "<array>"]], array_format=array_format) == expected_top_level

nested = {"items": [{"file": b"a"}, {"file": b"b"}]}
assert extract_files(nested, paths=[["items", "<array>", "file"]], array_format=array_format) == expected_nested
2 changes: 1 addition & 1 deletion tests/test_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def test_extract_files_does_not_mutate_original_nested_array_path(self) -> None:
copied = deepcopy_with_paths(original, [["items", "<array>", "file"]])
extracted = extract_files(copied, paths=[["items", "<array>", "file"]])

assert extracted == [("items[][file]", file1), ("items[][file]", file2)]
assert [entry for _, entry in extracted] == [file1, file2]
assert original == {
"items": [
{"file": file1, "extra": 1},
Expand Down