Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 19 additions & 17 deletions apps/docs/integrations/openai.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,9 @@ import { withSupermemory } from "@supermemory/tools/openai"
const openai = new OpenAI()

// Wrap client with memory - memories auto-injected into system prompts
const client = withSupermemory(openai, "user-123", {
const client = withSupermemory(openai, {
containerTag: "user-123",
customId: "conversation-456",
mode: "full", // "profile" | "query" | "full"
addMemory: "always", // "always" | "never"
})
Expand All @@ -62,21 +64,17 @@ const response = await client.chat.completions.create({
### Configuration Options

```typescript
const client = withSupermemory(openai, "user-123", {
// Memory search mode
mode: "full", // "profile" (user profile only), "query" (search only), "full" (both)

// Auto-save conversations as memories
addMemory: "always", // "always" | "never"

// Group messages into conversations
conversationId: "conv-456",

// Enable debug logging
verbose: true,

// Custom API endpoint
baseUrl: "https://custom.api.com"
const client = withSupermemory(openai, {
// Required options
containerTag: "user-123", // Scopes memories to this user
customId: "conversation-456", // Groups messages into conversations

// Optional options
mode: "full", // "profile" (user profile only), "query" (search only), "full" (both)
addMemory: "always", // "always" | "never" - auto-save conversations as memories
verbose: true, // Enable debug logging
apiKey: "sm_...", // Supermemory API key (or use SUPERMEMORY_API_KEY env var)
baseUrl: "https://custom.api.com" // Custom API endpoint
})
```

Expand All @@ -91,7 +89,11 @@ const client = withSupermemory(openai, "user-123", {
### Works with Responses API Too

```typescript
const client = withSupermemory(openai, "user-123", { mode: "full" })
const client = withSupermemory(openai, {
containerTag: "user-123",
customId: "conversation-456",
mode: "full",
})

// Memories injected into instructions
const response = await client.responses.create({
Expand Down
98 changes: 70 additions & 28 deletions packages/openai-sdk-python/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ The easiest way to add memory capabilities to your OpenAI client is using the `w
```python
import asyncio
from openai import AsyncOpenAI
from supermemory_openai import with_supermemory, OpenAIMiddlewareOptions
from supermemory_openai import with_supermemory, SupermemoryOpenAIOptions

async def main():
# Create OpenAI client
Expand All @@ -44,8 +44,9 @@ async def main():
# Wrap with Supermemory middleware
openai_with_memory = with_supermemory(
openai,
container_tag="user-123", # Unique identifier for user's memories
options=OpenAIMiddlewareOptions(
SupermemoryOpenAIOptions(
container_tag="user-123", # Unique identifier for user's memories
custom_id="chat-session-1", # Required: groups messages into conversations
mode="full", # "profile", "query", or "full"
verbose=True, # Enable logging
add_memory="always" # Automatically save conversations
Expand Down Expand Up @@ -118,11 +119,17 @@ The middleware also works with synchronous OpenAI clients:

```python
from openai import OpenAI
from supermemory_openai import with_supermemory
from supermemory_openai import with_supermemory, SupermemoryOpenAIOptions

# Sync client
openai = OpenAI(api_key="your-openai-api-key")
openai_with_memory = with_supermemory(openai, "user-123")
openai_with_memory = with_supermemory(
openai,
SupermemoryOpenAIOptions(
container_tag="user-123",
custom_id="chat-session-1"
)
)

# Works the same way
response = openai_with_memory.chat.completions.create(
Expand All @@ -137,12 +144,24 @@ response = openai_with_memory.chat.completions.create(

```python
# Async context manager (recommended)
async with with_supermemory(openai, "user-123") as client:
async with with_supermemory(
openai,
SupermemoryOpenAIOptions(
container_tag="user-123",
custom_id="chat-session-1"
)
) as client:
response = await client.chat.completions.create(...)
# Background tasks automatically waited for on exit

# Manual cleanup
client = with_supermemory(openai, "user-123")
client = with_supermemory(
openai,
SupermemoryOpenAIOptions(
container_tag="user-123",
custom_id="chat-session-1"
)
)
response = await client.chat.completions.create(...)
await client.wait_for_background_tasks() # Ensure memory is saved
```
Expand All @@ -159,8 +178,11 @@ Injects all static and dynamic profile memories into every request. Best for mai
```python
openai_with_memory = with_supermemory(
openai,
"user-123",
OpenAIMiddlewareOptions(mode="profile")
SupermemoryOpenAIOptions(
container_tag="user-123",
custom_id="chat-session-1",
mode="profile"
)
)
```

Expand All @@ -170,8 +192,11 @@ Only searches for memories relevant to the current user message. More efficient
```python
openai_with_memory = with_supermemory(
openai,
"user-123",
OpenAIMiddlewareOptions(mode="query")
SupermemoryOpenAIOptions(
container_tag="user-123",
custom_id="chat-session-1",
mode="query"
)
)
```

Expand All @@ -181,8 +206,11 @@ Combines both profile and query modes - includes all profile memories plus relev
```python
openai_with_memory = with_supermemory(
openai,
"user-123",
OpenAIMiddlewareOptions(mode="full")
SupermemoryOpenAIOptions(
container_tag="user-123",
custom_id="chat-session-1",
mode="full"
)
)
```

Expand All @@ -192,22 +220,30 @@ Control when conversations are automatically saved as memories:

```python
# Always save conversations as memories
OpenAIMiddlewareOptions(add_memory="always")
SupermemoryOpenAIOptions(
container_tag="user-123",
custom_id="chat-session-1",
add_memory="always"
)

# Never save conversations (default)
OpenAIMiddlewareOptions(add_memory="never")
SupermemoryOpenAIOptions(
container_tag="user-123",
custom_id="chat-session-1",
add_memory="never"
)
```

### Complete Configuration Example

```python
from supermemory_openai import with_supermemory, OpenAIMiddlewareOptions
from supermemory_openai import with_supermemory, SupermemoryOpenAIOptions

openai_with_memory = with_supermemory(
openai_client,
container_tag="user-123",
options=OpenAIMiddlewareOptions(
conversation_id="chat-session-456", # Group messages into conversations
SupermemoryOpenAIOptions(
container_tag="user-123",
custom_id="chat-session-456",
verbose=True, # Enable detailed logging
mode="full", # Use both profile and query
add_memory="always" # Auto-save conversations
Expand Down Expand Up @@ -291,25 +327,25 @@ Wraps an OpenAI client with automatic memory injection middleware.
```python
def with_supermemory(
openai_client: Union[OpenAI, AsyncOpenAI],
container_tag: str,
options: Optional[OpenAIMiddlewareOptions] = None
options: SupermemoryOpenAIOptions
) -> Union[OpenAI, AsyncOpenAI]
```

**Parameters:**
- `openai_client`: OpenAI or AsyncOpenAI client instance
- `container_tag`: Unique identifier for memory storage (e.g., user ID)
- `options`: Configuration options (see `OpenAIMiddlewareOptions`)
- `options`: Configuration options (see `SupermemoryOpenAIOptions`)

#### `OpenAIMiddlewareOptions`
#### `SupermemoryOpenAIOptions`

Configuration dataclass for middleware behavior.

```python
@dataclass
class OpenAIMiddlewareOptions:
conversation_id: Optional[str] = None # Group messages into conversations
verbose: bool = False # Enable detailed logging
class SupermemoryOpenAIOptions:
container_tag: str # Required: unique identifier for memory storage
custom_id: str # Required: groups messages into conversations
api_key: Optional[str] = None # Supermemory API key (or use env var)
verbose: bool = False # Enable detailed logging
mode: Literal["profile", "query", "full"] = "profile" # Memory injection mode
add_memory: Literal["always", "never"] = "never" # Auto-save behavior
```
Expand Down Expand Up @@ -349,7 +385,13 @@ from supermemory_openai import (

try:
# This will raise SupermemoryConfigurationError if API key is missing
client = with_supermemory(openai_client, "user-123")
client = with_supermemory(
openai_client,
SupermemoryOpenAIOptions(
container_tag="user-123",
custom_id="chat-session-1"
)
)

response = await client.chat.completions.create(
messages=[{"role": "user", "content": "Hello"}],
Expand Down
2 changes: 1 addition & 1 deletion packages/openai-sdk-python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "hatchling.build"

[project]
name = "supermemory-openai-sdk"
version = "1.0.2"
version = "2.0.0"
description = "Memory tools for OpenAI function calling with supermemory"
readme = "README.md"
license = "MIT"
Expand Down
4 changes: 2 additions & 2 deletions packages/openai-sdk-python/src/supermemory_openai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

from .middleware import (
with_supermemory,
OpenAIMiddlewareOptions,
SupermemoryOpenAIOptions,
SupermemoryOpenAIWrapper,
)

Expand Down Expand Up @@ -58,7 +58,7 @@
"create_add_memory_tool",
# Middleware
"with_supermemory",
"OpenAIMiddlewareOptions",
"SupermemoryOpenAIOptions",
"SupermemoryOpenAIWrapper",
# Utils
"Logger",
Expand Down
Loading
Loading