shivXy commited on
Commit
46a5c14
·
1 Parent(s): 60ed07e
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ __pycache__/
2
+ .chainlit/
3
+ .venv/
4
+ .env
Dockerfile ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Get a distribution that has uv already installed
3
+ FROM ghcr.io/astral-sh/uv:python3.13-bookworm-slim
4
+
5
+ # Add user - this is the user that will run the app
6
+ # If you do not set user, the app will run as root (undesirable)
7
+ RUN useradd -m -u 1000 user
8
+ USER user
9
+
10
+ # Set the home directory and path
11
+ ENV HOME=/home/user \
12
+ PATH=/home/user/.local/bin:$PATH
13
+
14
+ ENV UVICORN_WS_PROTOCOL=websockets
15
+
16
+
17
+ # Set the working directory
18
+ WORKDIR $HOME/app
19
+
20
+ # Copy the app to the container
21
+ COPY --chown=user . $HOME/app
22
+
23
+ # Install the dependencies
24
+ # RUN uv sync --frozen
25
+ RUN uv sync
26
+
27
+ # Expose the port
28
+ EXPOSE 7860
29
+
30
+ # Run the app
31
+ CMD ["uv", "run", "chainlit", "run", "app.py", "--host", "0.0.0.0", "--port", "7860"]
aimakerspace/__init__.py ADDED
File without changes
aimakerspace/openai_utils/__init__.py ADDED
File without changes
aimakerspace/openai_utils/chatmodel.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI, AsyncOpenAI
2
+ from dotenv import load_dotenv
3
+ import os
4
+
5
+ load_dotenv()
6
+
7
+
8
+ class ChatOpenAI:
9
+ def __init__(self, model_name: str = "gpt-4o-mini"):
10
+ self.model_name = model_name
11
+ self.openai_api_key = os.getenv("OPENAI_API_KEY")
12
+ if self.openai_api_key is None:
13
+ raise ValueError("OPENAI_API_KEY is not set")
14
+
15
+ def run(self, messages, text_only: bool = True, **kwargs):
16
+ if not isinstance(messages, list):
17
+ raise ValueError("messages must be a list")
18
+
19
+ client = OpenAI()
20
+ response = client.chat.completions.create(
21
+ model=self.model_name, messages=messages, **kwargs
22
+ )
23
+
24
+ if text_only:
25
+ return response.choices[0].message.content
26
+
27
+ return response
28
+
29
+ async def astream(self, messages, **kwargs):
30
+ if not isinstance(messages, list):
31
+ raise ValueError("messages must be a list")
32
+
33
+ client = AsyncOpenAI()
34
+
35
+ stream = await client.chat.completions.create(
36
+ model=self.model_name,
37
+ messages=messages,
38
+ stream=True,
39
+ **kwargs
40
+ )
41
+
42
+ async for chunk in stream:
43
+ content = chunk.choices[0].delta.content
44
+ if content is not None:
45
+ yield content
aimakerspace/openai_utils/embedding.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv
2
+ from openai import AsyncOpenAI, OpenAI
3
+ import openai
4
+ from typing import List
5
+ import os
6
+ import asyncio
7
+
8
+
9
+ class EmbeddingModel:
10
+ def __init__(self, embeddings_model_name: str = "text-embedding-3-small"):
11
+ load_dotenv()
12
+ self.openai_api_key = os.getenv("OPENAI_API_KEY")
13
+ self.async_client = AsyncOpenAI()
14
+ self.client = OpenAI()
15
+
16
+ if self.openai_api_key is None:
17
+ raise ValueError(
18
+ "OPENAI_API_KEY environment variable is not set. Please set it to your OpenAI API key."
19
+ )
20
+ openai.api_key = self.openai_api_key
21
+ self.embeddings_model_name = embeddings_model_name
22
+
23
+ async def async_get_embeddings(self, list_of_text: List[str]) -> List[List[float]]:
24
+ embedding_response = await self.async_client.embeddings.create(
25
+ input=list_of_text, model=self.embeddings_model_name
26
+ )
27
+
28
+ return [embeddings.embedding for embeddings in embedding_response.data]
29
+
30
+ async def async_get_embedding(self, text: str) -> List[float]:
31
+ embedding = await self.async_client.embeddings.create(
32
+ input=text, model=self.embeddings_model_name
33
+ )
34
+
35
+ return embedding.data[0].embedding
36
+
37
+ def get_embeddings(self, list_of_text: List[str]) -> List[List[float]]:
38
+ embedding_response = self.client.embeddings.create(
39
+ input=list_of_text, model=self.embeddings_model_name
40
+ )
41
+
42
+ return [embeddings.embedding for embeddings in embedding_response.data]
43
+
44
+ def get_embedding(self, text: str) -> List[float]:
45
+ embedding = self.client.embeddings.create(
46
+ input=text, model=self.embeddings_model_name
47
+ )
48
+
49
+ return embedding.data[0].embedding
50
+
51
+
52
+ if __name__ == "__main__":
53
+ embedding_model = EmbeddingModel()
54
+ print(asyncio.run(embedding_model.async_get_embedding("Hello, world!")))
55
+ print(
56
+ asyncio.run(
57
+ embedding_model.async_get_embeddings(["Hello, world!", "Goodbye, world!"])
58
+ )
59
+ )
aimakerspace/openai_utils/prompts.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+
4
+ class BasePrompt:
5
+ def __init__(self, prompt):
6
+ """
7
+ Initializes the BasePrompt object with a prompt template.
8
+
9
+ :param prompt: A string that can contain placeholders within curly braces
10
+ """
11
+ self.prompt = prompt
12
+ self._pattern = re.compile(r"\{([^}]+)\}")
13
+
14
+ def format_prompt(self, **kwargs):
15
+ """
16
+ Formats the prompt string using the keyword arguments provided.
17
+
18
+ :param kwargs: The values to substitute into the prompt string
19
+ :return: The formatted prompt string
20
+ """
21
+ matches = self._pattern.findall(self.prompt)
22
+ return self.prompt.format(**{match: kwargs.get(match, "") for match in matches})
23
+
24
+ def get_input_variables(self):
25
+ """
26
+ Gets the list of input variable names from the prompt string.
27
+
28
+ :return: List of input variable names
29
+ """
30
+ return self._pattern.findall(self.prompt)
31
+
32
+
33
+ class RolePrompt(BasePrompt):
34
+ def __init__(self, prompt, role: str):
35
+ """
36
+ Initializes the RolePrompt object with a prompt template and a role.
37
+
38
+ :param prompt: A string that can contain placeholders within curly braces
39
+ :param role: The role for the message ('system', 'user', or 'assistant')
40
+ """
41
+ super().__init__(prompt)
42
+ self.role = role
43
+
44
+ def create_message(self, format=True, **kwargs):
45
+ """
46
+ Creates a message dictionary with a role and a formatted message.
47
+
48
+ :param kwargs: The values to substitute into the prompt string
49
+ :return: Dictionary containing the role and the formatted message
50
+ """
51
+ if format:
52
+ return {"role": self.role, "content": self.format_prompt(**kwargs)}
53
+
54
+ return {"role": self.role, "content": self.prompt}
55
+
56
+
57
+ class SystemRolePrompt(RolePrompt):
58
+ def __init__(self, prompt: str):
59
+ super().__init__(prompt, "system")
60
+
61
+
62
+ class UserRolePrompt(RolePrompt):
63
+ def __init__(self, prompt: str):
64
+ super().__init__(prompt, "user")
65
+
66
+
67
+ class AssistantRolePrompt(RolePrompt):
68
+ def __init__(self, prompt: str):
69
+ super().__init__(prompt, "assistant")
70
+
71
+
72
+ if __name__ == "__main__":
73
+ prompt = BasePrompt("Hello {name}, you are {age} years old")
74
+ print(prompt.format_prompt(name="John", age=30))
75
+
76
+ prompt = SystemRolePrompt("Hello {name}, you are {age} years old")
77
+ print(prompt.create_message(name="John", age=30))
78
+ print(prompt.get_input_variables())
aimakerspace/text_utils.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+ import PyPDF2
4
+
5
+
6
+ class TextFileLoader:
7
+ def __init__(self, path: str, encoding: str = "utf-8"):
8
+ self.documents = []
9
+ self.path = path
10
+ self.encoding = encoding
11
+
12
+ def load(self):
13
+ if os.path.isdir(self.path):
14
+ self.load_directory()
15
+ elif os.path.isfile(self.path) and self.path.endswith(".txt"):
16
+ self.load_file()
17
+ else:
18
+ raise ValueError(
19
+ "Provided path is neither a valid directory nor a .txt file."
20
+ )
21
+
22
+ def load_file(self):
23
+ with open(self.path, "r", encoding=self.encoding) as f:
24
+ self.documents.append(f.read())
25
+
26
+ def load_directory(self):
27
+ for root, _, files in os.walk(self.path):
28
+ for file in files:
29
+ if file.endswith(".txt"):
30
+ with open(
31
+ os.path.join(root, file), "r", encoding=self.encoding
32
+ ) as f:
33
+ self.documents.append(f.read())
34
+
35
+ def load_documents(self):
36
+ self.load()
37
+ return self.documents
38
+
39
+
40
+ class CharacterTextSplitter:
41
+ def __init__(
42
+ self,
43
+ chunk_size: int = 1000,
44
+ chunk_overlap: int = 200,
45
+ ):
46
+ assert (
47
+ chunk_size > chunk_overlap
48
+ ), "Chunk size must be greater than chunk overlap"
49
+
50
+ self.chunk_size = chunk_size
51
+ self.chunk_overlap = chunk_overlap
52
+
53
+ def split(self, text: str) -> List[str]:
54
+ chunks = []
55
+ for i in range(0, len(text), self.chunk_size - self.chunk_overlap):
56
+ chunks.append(text[i : i + self.chunk_size])
57
+ return chunks
58
+
59
+ def split_texts(self, texts: List[str]) -> List[str]:
60
+ chunks = []
61
+ for text in texts:
62
+ chunks.extend(self.split(text))
63
+ return chunks
64
+
65
+
66
+ class PDFLoader:
67
+ def __init__(self, path: str):
68
+ self.documents = []
69
+ self.path = path
70
+ print(f"PDFLoader initialized with path: {self.path}")
71
+
72
+ def load(self):
73
+ print(f"Loading PDF from path: {self.path}")
74
+ print(f"Path exists: {os.path.exists(self.path)}")
75
+ print(f"Is file: {os.path.isfile(self.path)}")
76
+ print(f"Is directory: {os.path.isdir(self.path)}")
77
+ print(f"File permissions: {oct(os.stat(self.path).st_mode)[-3:]}")
78
+
79
+ try:
80
+ # Try to open the file first to verify access
81
+ with open(self.path, 'rb') as test_file:
82
+ pass
83
+
84
+ # If we can open it, proceed with loading
85
+ self.load_file()
86
+
87
+ except IOError as e:
88
+ raise ValueError(f"Cannot access file at '{self.path}': {str(e)}")
89
+ except Exception as e:
90
+ raise ValueError(f"Error processing file at '{self.path}': {str(e)}")
91
+
92
+ def load_file(self):
93
+ with open(self.path, 'rb') as file:
94
+ # Create PDF reader object
95
+ pdf_reader = PyPDF2.PdfReader(file)
96
+
97
+ # Extract text from each page
98
+ text = ""
99
+ for page in pdf_reader.pages:
100
+ text += page.extract_text() + "\n"
101
+
102
+ self.documents.append(text)
103
+
104
+ def load_directory(self):
105
+ for root, _, files in os.walk(self.path):
106
+ for file in files:
107
+ if file.lower().endswith('.pdf'):
108
+ file_path = os.path.join(root, file)
109
+ with open(file_path, 'rb') as f:
110
+ pdf_reader = PyPDF2.PdfReader(f)
111
+
112
+ # Extract text from each page
113
+ text = ""
114
+ for page in pdf_reader.pages:
115
+ text += page.extract_text() + "\n"
116
+
117
+ self.documents.append(text)
118
+
119
+ def load_documents(self):
120
+ self.load()
121
+ return self.documents
122
+
123
+
124
+ if __name__ == "__main__":
125
+ loader = TextFileLoader("data/KingLear.txt")
126
+ loader.load()
127
+ splitter = CharacterTextSplitter()
128
+ chunks = splitter.split_texts(loader.documents)
129
+ print(len(chunks))
130
+ print(chunks[0])
131
+ print("--------")
132
+ print(chunks[1])
133
+ print("--------")
134
+ print(chunks[-2])
135
+ print("--------")
136
+ print(chunks[-1])
aimakerspace/vectordatabase.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from collections import defaultdict
3
+ from typing import List, Tuple, Callable
4
+ from aimakerspace.openai_utils.embedding import EmbeddingModel
5
+ import asyncio
6
+
7
+
8
+ def cosine_similarity(vector_a: np.array, vector_b: np.array) -> float:
9
+ """Computes the cosine similarity between two vectors."""
10
+ dot_product = np.dot(vector_a, vector_b)
11
+ norm_a = np.linalg.norm(vector_a)
12
+ norm_b = np.linalg.norm(vector_b)
13
+ return dot_product / (norm_a * norm_b)
14
+
15
+
16
+ class VectorDatabase:
17
+ def __init__(self, embedding_model: EmbeddingModel = None):
18
+ self.vectors = defaultdict(np.array)
19
+ self.embedding_model = embedding_model or EmbeddingModel()
20
+
21
+ def insert(self, key: str, vector: np.array) -> None:
22
+ self.vectors[key] = vector
23
+
24
+ def search(
25
+ self,
26
+ query_vector: np.array,
27
+ k: int,
28
+ distance_measure: Callable = cosine_similarity,
29
+ ) -> List[Tuple[str, float]]:
30
+ scores = [
31
+ (key, distance_measure(query_vector, vector))
32
+ for key, vector in self.vectors.items()
33
+ ]
34
+ return sorted(scores, key=lambda x: x[1], reverse=True)[:k]
35
+
36
+ def search_by_text(
37
+ self,
38
+ query_text: str,
39
+ k: int,
40
+ distance_measure: Callable = cosine_similarity,
41
+ return_as_text: bool = False,
42
+ ) -> List[Tuple[str, float]]:
43
+ query_vector = self.embedding_model.get_embedding(query_text)
44
+ results = self.search(query_vector, k, distance_measure)
45
+ return [result[0] for result in results] if return_as_text else results
46
+
47
+ def retrieve_from_key(self, key: str) -> np.array:
48
+ return self.vectors.get(key, None)
49
+
50
+ async def abuild_from_list(self, list_of_text: List[str]) -> "VectorDatabase":
51
+ embeddings = await self.embedding_model.async_get_embeddings(list_of_text)
52
+ for text, embedding in zip(list_of_text, embeddings):
53
+ self.insert(text, np.array(embedding))
54
+ return self
55
+
56
+
57
+ if __name__ == "__main__":
58
+ list_of_text = [
59
+ "I like to eat broccoli and bananas.",
60
+ "I ate a banana and spinach smoothie for breakfast.",
61
+ "Chinchillas and kittens are cute.",
62
+ "My sister adopted a kitten yesterday.",
63
+ "Look at this cute hamster munching on a piece of broccoli.",
64
+ ]
65
+
66
+ vector_db = VectorDatabase()
67
+ vector_db = asyncio.run(vector_db.abuild_from_list(list_of_text))
68
+ k = 2
69
+
70
+ searched_vector = vector_db.search_by_text("I think fruit is awesome!", k=k)
71
+ print(f"Closest {k} vector(s):", searched_vector)
72
+
73
+ retrieved_vector = vector_db.retrieve_from_key(
74
+ "I like to eat broccoli and bananas."
75
+ )
76
+ print("Retrieved vector:", retrieved_vector)
77
+
78
+ relevant_texts = vector_db.search_by_text(
79
+ "I think fruit is awesome!", k=k, return_as_text=True
80
+ )
81
+ print(f"Closest {k} text(s):", relevant_texts)
app.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List
3
+ from chainlit.types import AskFileResponse
4
+ from aimakerspace.text_utils import CharacterTextSplitter, TextFileLoader, PDFLoader
5
+ from aimakerspace.openai_utils.prompts import (
6
+ UserRolePrompt,
7
+ SystemRolePrompt,
8
+ AssistantRolePrompt,
9
+ )
10
+ from aimakerspace.openai_utils.embedding import EmbeddingModel
11
+ from aimakerspace.vectordatabase import VectorDatabase
12
+ from aimakerspace.openai_utils.chatmodel import ChatOpenAI
13
+ import chainlit as cl
14
+
15
+ system_template = """\
16
+ Use the following context to answer a users question. If you cannot find the answer in the context, say you don't know the answer."""
17
+ system_role_prompt = SystemRolePrompt(system_template)
18
+
19
+ user_prompt_template = """\
20
+ Context:
21
+ {context}
22
+
23
+ Question:
24
+ {question}
25
+ """
26
+ user_role_prompt = UserRolePrompt(user_prompt_template)
27
+
28
+ class RetrievalAugmentedQAPipeline:
29
+ def __init__(self, llm: ChatOpenAI, vector_db_retriever: VectorDatabase) -> None:
30
+ self.llm = llm
31
+ self.vector_db_retriever = vector_db_retriever
32
+
33
+ async def arun_pipeline(self, user_query: str):
34
+ context_list = self.vector_db_retriever.search_by_text(user_query, k=4)
35
+
36
+ context_prompt = ""
37
+ for context in context_list:
38
+ context_prompt += context[0] + "\n"
39
+
40
+ formatted_system_prompt = system_role_prompt.create_message()
41
+
42
+ formatted_user_prompt = user_role_prompt.create_message(question=user_query, context=context_prompt)
43
+
44
+ async def generate_response():
45
+ async for chunk in self.llm.astream([formatted_system_prompt, formatted_user_prompt]):
46
+ yield chunk
47
+
48
+ return {"response": generate_response(), "context": context_list}
49
+
50
+ text_splitter = CharacterTextSplitter()
51
+
52
+
53
+ def process_file(file: AskFileResponse):
54
+ import tempfile
55
+ import shutil
56
+
57
+ print(f"Processing file: {file.name}")
58
+
59
+ # Create a temporary file with the correct extension
60
+ suffix = f".{file.name.split('.')[-1]}"
61
+ with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as temp_file:
62
+ # Copy the uploaded file content to the temporary file
63
+ shutil.copyfile(file.path, temp_file.name)
64
+ print(f"Created temporary file at: {temp_file.name}")
65
+
66
+ # Create appropriate loader
67
+ if file.name.lower().endswith('.pdf'):
68
+ loader = PDFLoader(temp_file.name)
69
+ else:
70
+ loader = TextFileLoader(temp_file.name)
71
+
72
+ try:
73
+ # Load and process the documents
74
+ documents = loader.load_documents()
75
+ texts = text_splitter.split_texts(documents)
76
+ return texts
77
+ finally:
78
+ # Clean up the temporary file
79
+ try:
80
+ os.unlink(temp_file.name)
81
+ except Exception as e:
82
+ print(f"Error cleaning up temporary file: {e}")
83
+
84
+
85
+ @cl.on_chat_start
86
+ async def on_chat_start():
87
+ files = None
88
+
89
+ # Wait for the user to upload a file
90
+ while files == None:
91
+ files = await cl.AskFileMessage(
92
+ content="Please upload a Text or PDF file to begin!",
93
+ accept=["text/plain", "application/pdf"],
94
+ max_size_mb=2,
95
+ timeout=180,
96
+ ).send()
97
+
98
+ file = files[0]
99
+
100
+ msg = cl.Message(
101
+ content=f"Processing `{file.name}`..."
102
+ )
103
+ await msg.send()
104
+
105
+ # load the file
106
+ texts = process_file(file)
107
+
108
+ print(f"Processing {len(texts)} text chunks")
109
+
110
+ # Create a dict vector store
111
+ vector_db = VectorDatabase()
112
+ vector_db = await vector_db.abuild_from_list(texts)
113
+
114
+ chat_openai = ChatOpenAI()
115
+
116
+ # Create a chain
117
+ retrieval_augmented_qa_pipeline = RetrievalAugmentedQAPipeline(
118
+ vector_db_retriever=vector_db,
119
+ llm=chat_openai
120
+ )
121
+
122
+ # Let the user know that the system is ready
123
+ msg.content = f"Processing `{file.name}` done. You can now ask questions!"
124
+ await msg.update()
125
+
126
+ cl.user_session.set("chain", retrieval_augmented_qa_pipeline)
127
+
128
+
129
+ @cl.on_message
130
+ async def main(message):
131
+ chain = cl.user_session.get("chain")
132
+
133
+ msg = cl.Message(content="")
134
+ result = await chain.arun_pipeline(message.content)
135
+
136
+ async for stream_resp in result["response"]:
137
+ await msg.stream_token(stream_resp)
138
+
139
+ await msg.send()
pyproject.toml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "aie5-deploypythonicrag"
3
+ version = "0.1.0"
4
+ description = "Simple Pythonic RAG App"
5
+ readme = "README.md"
6
+ requires-python = ">=3.13"
7
+ dependencies = [
8
+ "chainlit>=2.0.4",
9
+ "numpy>=2.2.2",
10
+ "openai>=1.59.9",
11
+ "pydantic==2.10.1",
12
+ "pypdf2>=3.0.1",
13
+ "websockets>=14.2",
14
+ ]