Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/run-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10", "3.11", "3.12"]
python-version: ["3.10", "3.11", "3.12", "3.13"]
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

Expand Down
16 changes: 14 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,13 @@

<h4 align="center">
<a href="https://pypi.org/project/clonellm/" target="_blank">
<img src="https://img.shields.io/badge/release-v0.3.0-green" alt="Latest Release">
<img src="https://img.shields.io/badge/release-v0.4.0-green" alt="Latest Release">
</a>
<a href="https://pypi.org/project/clonellm/" target="_blank">
<img src="https://img.shields.io/pypi/v/clonellm.svg" alt="PyPI Version">
</a>
<a target="_blank">
<img src="https://img.shields.io/badge/python-3.10%20%7C%203.11%20%7C%203.12-blue" alt="Python Versions">
<img src="https://img.shields.io/badge/python-3.10%20%7C%203.11%20%7C%203.12%20%7C%203.13-blue" alt="Python Versions">
</a>
<a target="_blank">
<img src="https://img.shields.io/pypi/l/clonellm" alt="PyPI License">
Expand Down Expand Up @@ -247,6 +247,18 @@ clone.clear_memory()
# clone.reset_memory()
```

### Additional system prompts
You can pass additional system prompts (instructions) to the clone to guide its behavior.
```python
from clonellm import CloneLLM

clone = CloneLLM(
model="gpt-4o",
documents=documents,
system_prompts=["Keep your responses brief and concise, and always respond in first person."],
)
```

### Streaming
CloneLLM supports streaming responses from the LLM, allowing for real-time processing of text as it is being generated, rather than receiving the whole output at once.
```python
Expand Down
1 change: 1 addition & 0 deletions examples/advanced_clone.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ def main() -> None:
vector_store=RagVectorStore.Chroma,
user_profile=profile,
memory=MAX_MEMORY_SIZE,
system_prompts=["Keep your responses brief and concise, and always respond in first person."],
request_timeout=5,
temperature=0.3,
max_tokens=256,
Expand Down
1 change: 1 addition & 0 deletions examples/advanced_clone_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ async def main() -> None:
vector_store=RagVectorStore.Chroma,
user_profile=profile,
memory=MAX_MEMORY_SIZE,
system_prompts=["Keep your responses brief and concise, and always respond in first person."],
request_timeout=5,
temperature=0.5,
max_tokens=256,
Expand Down
27 changes: 19 additions & 8 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 3 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"

[tool.poetry]
name = "clonellm"
version = "0.3.0"
version = "0.4.0"
description = "Python package to create an AI clone of yourself using LLMs."
packages = [{ from = "src", include = "clonellm" }]
include = ["src/clonellm/py.typed"]
Expand All @@ -15,10 +15,11 @@ keywords = ["python", "ai", "llm", "rag"]
repository = "https://github.com/msamsami/clonellm"

[tool.poetry.dependencies]
python = ">=3.10,<3.13"
python = ">=3.10,<3.14"
litellm = "^1.36.0"
langchain = "^0.1.17"
pydantic = {version = ">=2.8.0", python = ">=3.12.4"}
legacy-cgi = {version = ">=2.6.2", python = ">=3.13"}
langchain-chroma = {version = "*", optional = true}
faiss-cpu = {version = "*", optional = true}

Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
litellm>=1.36.0,<2.0.0
langchain>=0.1.17,<1.0.0
pydantic>=2.8.0; python_version >= '3.12.4'
legacy-cgi>=2.6.2; python_version >= '3.13'
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,10 @@ def get_requirements(filename: str) -> list[str]:
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"License :: OSI Approved :: MIT License",
],
python_requires=">=3.10,<3.13",
python_requires=">=3.10,<3.14",
install_requires=get_requirements("requirements.txt"),
extras_require={"chroma": ["langchain-chroma"], "faiss": ["faiss-cpu"]},
)
2 changes: 1 addition & 1 deletion src/clonellm/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
__author__ = "Mehdi Samsami"
__version__ = "0.3.0"
__version__ = "0.4.0"

from .core import CloneLLM
from .embed import LiteLLMEmbeddings
Expand Down
14 changes: 10 additions & 4 deletions src/clonellm/_prompt.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from typing import Optional

from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

__all__ = ("summarize_context_prompt", "context_prompt", "user_profile_prompt", "history_prompt", "question_prompt")
__all__ = ("summarize_context_prompt", "get_context_prompt", "user_profile_prompt", "history_prompt", "question_prompt")


summarize_context_prompt = ChatPromptTemplate.from_messages(
Expand All @@ -13,8 +15,8 @@
)


context_prompt = ChatPromptTemplate.from_messages(
[
def get_context_prompt(additional_system_prompts: Optional[list[str]] = None) -> ChatPromptTemplate:
prompts = [
("system", "You are my human clone. Act as I would. Do not ask questions about me. If anyone asks, you are me."),
("system", "You are having a conversation with someone who wants to learn more about you."),
("system", "You MUST speak in first person. If you do not, you will be terminated."),
Expand All @@ -27,10 +29,14 @@
),
),
("system", "Use the personality traits and communication samples (if provided) to know how to craft your responses."),
]
prompts += [("system", prompt) for prompt in additional_system_prompts or []]
prompts += [
("system", "Here is some relevant context you have related to the question:\n {context}"),
("system", "ALWAYS ensure your responses are aligned with the provided context (and personal information)."),
]
)
return ChatPromptTemplate.from_messages(prompts)


user_profile_prompt = ChatPromptTemplate.from_messages(
[
Expand Down
15 changes: 12 additions & 3 deletions src/clonellm/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

from ._base import LiteLLMMixin
from ._prompt import (
context_prompt,
get_context_prompt,
history_prompt,
question_prompt,
summarize_context_prompt,
Expand All @@ -52,6 +52,7 @@ class CloneLLM(LiteLLMMixin):
user_profile (Optional[UserProfile | dict[str, Any] | str]): The profile of the user to be cloned by the language model. Defaults to None.
memory (Optional[bool | int]): Maximum number of messages in conversation memory. Defaults to None (or 0) for no memory. -1 or `True` means infinite memory.
api_key (Optional[str]): The API key to use. Defaults to None.
system_prompts (Optional[list[str]]): Additional system prompts (instructions) for the language model. Defaults to None.
**kwargs (Any): Additional keyword arguments supported by the `langchain_community.chat_models.ChatLiteLLM` class.

"""
Expand All @@ -70,13 +71,15 @@ def __init__(
user_profile: Optional[UserProfile | dict[str, Any] | str] = None,
memory: Optional[bool | int] = None,
api_key: Optional[str] = None,
system_prompts: Optional[list[str]] = None,
**kwargs: Any,
) -> None:
self.embedding = embedding
self.vector_store = vector_store
self.documents = documents
self.user_profile = user_profile
self.memory = memory
self.system_prompts = system_prompts

from_class_method: Optional[dict[str, Any]] = kwargs.pop(self._FROM_CLASS_METHOD_KWARG, None)
super().__init__(model, api_key, **kwargs)
Expand Down Expand Up @@ -134,6 +137,7 @@ def from_persist_directory(
user_profile: Optional[UserProfile | dict[str, Any] | str] = None,
memory: Optional[bool | int] = None,
api_key: Optional[str] = None,
system_prompts: Optional[list[str]] = None,
**kwargs: Any,
) -> Self:
"""Creates an instance of CloneLLM by loading a Chroma vector store from a persistent directory.
Expand All @@ -145,6 +149,7 @@ def from_persist_directory(
user_profile (Optional[UserProfile | dict[str, Any] | str]): The profile of the user to be cloned by the language model. Defaults to None.
memory (Optional[bool | int]): Maximum number of messages in conversation memory. Defaults to None (or 0) for no memory. -1 or `True` means infinite memory.
api_key (Optional[str]): The API key to use. Defaults to None.
system_prompts (Optional[list[str]]): Additional system prompts (instructions) for the language model. Defaults to None.
**kwargs (Any): Additional keyword arguments supported by the `langchain_community.chat_models.ChatLiteLLM` class.

Returns:
Expand All @@ -170,6 +175,7 @@ def from_persist_directory(
user_profile=user_profile,
memory=memory,
api_key=api_key,
system_prompts=system_prompts,
**kwargs,
)

Expand All @@ -181,6 +187,7 @@ def from_context(
user_profile: Optional[UserProfile | dict[str, Any] | str] = None,
memory: Optional[bool | int] = None,
api_key: Optional[str] = None,
system_prompts: Optional[list[str]] = None,
**kwargs: Any,
) -> Self:
"""Creates an instance of CloneLLM using a summarized context string instead of documents.
Expand All @@ -191,6 +198,7 @@ def from_context(
user_profile (Optional[UserProfile | dict[str, Any] | str]): The profile of the user to be cloned by the language model. Defaults to None.
memory (Optional[bool | int]): Maximum number of messages in conversation memory. Defaults to None (or 0) for no memory. -1 or `True` means infinite memory.
api_key (Optional[str]): The API key to use. Defaults to None.
system_prompts (Optional[list[str]]): Additional system prompts (instructions) for the language model. Defaults to None.
**kwargs (Any): Additional keyword arguments supported by the `langchain_community.chat_models.ChatLiteLLM` class.

Returns:
Expand All @@ -203,6 +211,7 @@ def from_context(
user_profile=user_profile,
memory=memory,
api_key=api_key,
system_prompts=system_prompts,
**kwargs,
)

Expand Down Expand Up @@ -339,7 +348,7 @@ def _get_retriever(self, k: int = 1) -> VectorStoreRetriever:
return self.db.as_retriever(search_kwargs={"k": k})

def _get_rag_chain(self) -> RunnableSerializable[Any, str]:
prompt = context_prompt.copy()
prompt = get_context_prompt(self.system_prompts)
if self.user_profile:
prompt += user_profile_prompt.format_messages(user_profile=self._user_profile)
prompt += question_prompt
Expand All @@ -348,7 +357,7 @@ def _get_rag_chain(self) -> RunnableSerializable[Any, str]:
return {"context": context, "input": RunnablePassthrough()} | prompt | self._llm | StrOutputParser()

def _get_rag_chain_with_history(self) -> RunnableWithMessageHistory:
prompt = context_prompt
prompt = get_context_prompt(self.system_prompts)
if self.user_profile:
prompt += user_profile_prompt.format_messages(user_profile=self._user_profile)
prompt += history_prompt
Expand Down