Skip to content

Commit 06d083f

Browse files
committed
docs(examples): add examples for streaming conversations
1 parent 63b0a22 commit 06d083f

File tree

2 files changed

+267
-0
lines changed

2 files changed

+267
-0
lines changed
Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
import asyncio
2+
import os
3+
import sys
4+
5+
curr_dir = os.path.dirname(os.path.realpath(__file__))
6+
repo_root = os.path.abspath(os.path.join(curr_dir, os.pardir))
7+
sys.path.insert(1, os.path.join(repo_root, "src"))
8+
9+
import typesense
10+
11+
from typesense.types.document import MessageChunk, StreamConfigBuilder
12+
13+
14+
def require_env(name: str) -> str:
15+
value = os.environ.get(name)
16+
if not value:
17+
raise RuntimeError(f"Missing required environment variable: {name}")
18+
return value
19+
20+
21+
async def main() -> None:
22+
typesense_api_key = require_env("TYPESENSE_API_KEY")
23+
openai_api_key = require_env("OPENAI_API_KEY")
24+
25+
client = typesense.AsyncClient(
26+
{
27+
"api_key": typesense_api_key,
28+
"nodes": [
29+
{
30+
"host": "localhost",
31+
"port": "8108",
32+
"protocol": "http",
33+
}
34+
],
35+
"connection_timeout_seconds": 10,
36+
}
37+
)
38+
39+
try:
40+
try:
41+
await client.conversations_models["conv-model-1"].delete()
42+
except Exception:
43+
pass
44+
45+
try:
46+
await client.collections["streaming_docs"].delete()
47+
except Exception:
48+
pass
49+
50+
try:
51+
await client.collections["conversation_store"].delete()
52+
except Exception:
53+
pass
54+
55+
await client.collections.create(
56+
{
57+
"name": "conversation_store",
58+
"fields": [
59+
{"name": "conversation_id", "type": "string"},
60+
{"name": "model_id", "type": "string"},
61+
{"name": "timestamp", "type": "int32"},
62+
{"name": "role", "type": "string", "index": False},
63+
{"name": "message", "type": "string", "index": False},
64+
],
65+
}
66+
)
67+
68+
await client.collections.create(
69+
{
70+
"name": "streaming_docs",
71+
"fields": [
72+
{"name": "title", "type": "string"},
73+
{
74+
"name": "embedding",
75+
"type": "float[]",
76+
"embed": {
77+
"from": ["title"],
78+
"model_config": {
79+
"model_name": "openai/text-embedding-3-small",
80+
"api_key": openai_api_key,
81+
},
82+
},
83+
},
84+
],
85+
}
86+
)
87+
88+
await client.collections["streaming_docs"].documents.create(
89+
{"id": "stream-1", "title": "Company profile: a developer tools firm."}
90+
)
91+
await client.collections["streaming_docs"].documents.create(
92+
{"id": "stream-2", "title": "Internal memo about quarterly planning."}
93+
)
94+
95+
conversation_model = await client.conversations_models.create(
96+
{
97+
"id": "conv-model-1",
98+
"model_name": "openai/gpt-3.5-turbo",
99+
"history_collection": "conversation_store",
100+
"api_key": openai_api_key,
101+
"system_prompt": (
102+
"You are an assistant for question-answering. "
103+
"Only use the provided context. Add some fluff about you Being an assistant built for Typesense Conversational Search and a brief overview of how it works"
104+
),
105+
"max_bytes": 16384,
106+
}
107+
)
108+
109+
stream = StreamConfigBuilder()
110+
111+
@stream.on_chunk
112+
def on_chunk(chunk: MessageChunk) -> None:
113+
print(chunk["message"], end="", flush=True)
114+
115+
@stream.on_complete
116+
def on_complete(response: dict) -> None:
117+
print("\n---\nComplete response keys:", response.keys())
118+
119+
await client.collections["streaming_docs"].documents.search(
120+
{
121+
"q": "What is this document about?",
122+
"query_by": "embedding",
123+
"exclude_fields": "embedding",
124+
"conversation": True,
125+
"prefix": False,
126+
"conversation_stream": True,
127+
"conversation_model_id": conversation_model["id"],
128+
"stream_config": stream,
129+
}
130+
)
131+
finally:
132+
await client.api_call.aclose()
133+
134+
135+
if __name__ == "__main__":
136+
asyncio.run(main())

examples/conversation_streaming.py

Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
from operator import truediv
2+
import os
3+
import sys
4+
5+
curr_dir = os.path.dirname(os.path.realpath(__file__))
6+
repo_root = os.path.abspath(os.path.join(curr_dir, os.pardir))
7+
sys.path.insert(1, os.path.join(repo_root, "src"))
8+
9+
import typesense
10+
11+
from typesense.types.document import MessageChunk, StreamConfigBuilder
12+
13+
14+
def require_env(name: str) -> str:
15+
value = os.environ.get(name)
16+
if not value:
17+
raise RuntimeError(f"Missing required environment variable: {name}")
18+
return value
19+
20+
21+
typesense_api_key = require_env("TYPESENSE_API_KEY")
22+
openai_api_key = require_env("OPENAI_API_KEY")
23+
24+
client = typesense.Client(
25+
{
26+
"api_key": typesense_api_key,
27+
"nodes": [
28+
{
29+
"host": "localhost",
30+
"port": "8108",
31+
"protocol": "http",
32+
}
33+
],
34+
"connection_timeout_seconds": 10,
35+
}
36+
)
37+
38+
try:
39+
client.conversations_models["conv-model-1"].delete()
40+
except Exception:
41+
pass
42+
43+
try:
44+
client.collections["streaming_docs"].delete()
45+
except Exception:
46+
pass
47+
48+
try:
49+
client.collections["conversation_store"].delete()
50+
except Exception:
51+
pass
52+
53+
client.collections.create(
54+
{
55+
"name": "conversation_store",
56+
"fields": [
57+
{"name": "conversation_id", "type": "string"},
58+
{"name": "model_id", "type": "string"},
59+
{"name": "timestamp", "type": "int32"},
60+
{"name": "role", "type": "string", "index": False},
61+
{"name": "message", "type": "string", "index": False},
62+
],
63+
}
64+
)
65+
66+
client.collections.create(
67+
{
68+
"name": "streaming_docs",
69+
"fields": [
70+
{"name": "title", "type": "string"},
71+
{
72+
"name": "embedding",
73+
"type": "float[]",
74+
"embed": {
75+
"from": ["title"],
76+
"model_config": {
77+
"model_name": "openai/text-embedding-3-small",
78+
"api_key": openai_api_key,
79+
},
80+
},
81+
},
82+
],
83+
}
84+
)
85+
86+
client.collections["streaming_docs"].documents.create(
87+
{"id": "stream-1", "title": "Company profile: a developer tools firm."}
88+
)
89+
client.collections["streaming_docs"].documents.create(
90+
{"id": "stream-2", "title": "Internal memo about a quarterly planning meeting."}
91+
)
92+
93+
conversation_model = client.conversations_models.create(
94+
{
95+
"id": "conv-model-1",
96+
"model_name": "openai/gpt-3.5-turbo",
97+
"history_collection": "conversation_store",
98+
"api_key": openai_api_key,
99+
"system_prompt": (
100+
"You are an assistant for question-answering. "
101+
"Only use the provided context. Add some fluff about you Being an assistant built for Typesense Conversational Search and a brief overview of how it works"
102+
),
103+
"max_bytes": 16384,
104+
}
105+
)
106+
107+
stream = StreamConfigBuilder()
108+
109+
110+
@stream.on_chunk
111+
def on_chunk(chunk: MessageChunk) -> None:
112+
print(chunk["message"], end="", flush=True)
113+
114+
115+
@stream.on_complete
116+
def on_complete(response: dict) -> None:
117+
print("\n---\nComplete response keys:", response.keys())
118+
119+
120+
client.collections["streaming_docs"].documents.search(
121+
{
122+
"q": "What is this document about?",
123+
"query_by": "embedding",
124+
"exclude_fields": "embedding",
125+
"conversation": True,
126+
"prefix": False,
127+
"conversation_stream": True,
128+
"conversation_model_id": conversation_model["id"],
129+
"stream_config": stream,
130+
}
131+
)

0 commit comments

Comments
 (0)