code
stringlengths 161
233k
| apis
sequencelengths 1
24
| extract_api
stringlengths 162
68.5k
|
---|---|---|
#!/usr/bin/env python3
from dataclasses import dataclass, field
from typing import cast
from loguru import logger
from llama_index.core import Document, VectorStoreIndex, Settings
from llama_index.core.query_engine import CitationQueryEngine
import nest_asyncio
from uglychain import Model, Retriever, StorageRetriever
from uglychain.storage import Storage, SQLiteStorage
from uglychain.llm.llama_index import LlamaIndexLLM
import sys
import logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
nest_asyncio.apply()
Settings.llm = LlamaIndexLLM(model=Model.GPT3_TURBO)
@dataclass
class GithubIndex:
filename: str = "data/github/github.db"
model: Model = Model.DEFAULT
summarizer_db: Storage = field(init=False)
retriever: StorageRetriever = field(init=False)
def __post_init__(self):
self.summarizer_db = SQLiteStorage(self.filename, "ReadmeSummarizer", 30)
self.retriever = Retriever.LlamaIndex.getStorage(
persist_dir="./data/github/repos"
)
if self._need_update:
self._update()
def search(self, query: str):
index = cast(VectorStoreIndex, self.retriever.index) # type: ignore
query_engine = CitationQueryEngine.from_args(index, similarity_top_k=5)
# query_engine = index.as_query_engine(llm=LlamaIndexLLM(Model.GPT3_TURBO), similarity_top_k=8) # type: ignore
return query_engine.query(query)
# self.retriever.get(query, "refine")
@property
def _need_update(self):
return False
def _update(self):
doc_chunks = []
data = self.summarizer_db.load(condition="timestamp = date('now','localtime')")
for key, value in data.items():
doc = Document(text=value, doc_id=key)
doc_chunks.append(doc)
index = cast(VectorStoreIndex, self.retriever.index) # type: ignore
logger.info("refresh_ref_docs")
index.refresh_ref_docs(doc_chunks)
self.retriever.storage.save(index)
logger.info("refresh_ref_docs done")
if __name__ == "__main__":
index = GithubIndex()
result = index.search("给我介绍几个关于使用大模型自动写代码的项目吧!")
# logger.debug(result.source_nodes)
logger.info(result)
| [
"llama_index.core.query_engine.CitationQueryEngine.from_args",
"llama_index.core.Document"
] | [((454, 512), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (473, 512), False, 'import logging\n'), ((587, 607), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (605, 607), False, 'import nest_asyncio\n'), ((623, 660), 'uglychain.llm.llama_index.LlamaIndexLLM', 'LlamaIndexLLM', ([], {'model': 'Model.GPT3_TURBO'}), '(model=Model.GPT3_TURBO)\n', (636, 660), False, 'from uglychain.llm.llama_index import LlamaIndexLLM\n'), ((544, 584), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (565, 584), False, 'import logging\n'), ((799, 816), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (804, 816), False, 'from dataclasses import dataclass, field\n'), ((851, 868), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (856, 868), False, 'from dataclasses import dataclass, field\n'), ((2276, 2295), 'loguru.logger.info', 'logger.info', (['result'], {}), '(result)\n', (2287, 2295), False, 'from loguru import logger\n'), ((513, 532), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (530, 532), False, 'import logging\n'), ((928, 980), 'uglychain.storage.SQLiteStorage', 'SQLiteStorage', (['self.filename', '"""ReadmeSummarizer"""', '(30)'], {}), "(self.filename, 'ReadmeSummarizer', 30)\n", (941, 980), False, 'from uglychain.storage import Storage, SQLiteStorage\n'), ((1006, 1072), 'uglychain.Retriever.LlamaIndex.getStorage', 'Retriever.LlamaIndex.getStorage', ([], {'persist_dir': '"""./data/github/repos"""'}), "(persist_dir='./data/github/repos')\n", (1037, 1072), False, 'from uglychain import Model, Retriever, StorageRetriever\n'), ((1203, 1247), 'typing.cast', 'cast', (['VectorStoreIndex', 'self.retriever.index'], {}), '(VectorStoreIndex, self.retriever.index)\n', (1207, 1247), False, 'from typing import cast\n'), ((1287, 1343), 'llama_index.core.query_engine.CitationQueryEngine.from_args', 'CitationQueryEngine.from_args', (['index'], {'similarity_top_k': '(5)'}), '(index, similarity_top_k=5)\n', (1316, 1343), False, 'from llama_index.core.query_engine import CitationQueryEngine\n'), ((1892, 1936), 'typing.cast', 'cast', (['VectorStoreIndex', 'self.retriever.index'], {}), '(VectorStoreIndex, self.retriever.index)\n', (1896, 1936), False, 'from typing import cast\n'), ((1961, 1992), 'loguru.logger.info', 'logger.info', (['"""refresh_ref_docs"""'], {}), "('refresh_ref_docs')\n", (1972, 1992), False, 'from loguru import logger\n'), ((2087, 2123), 'loguru.logger.info', 'logger.info', (['"""refresh_ref_docs done"""'], {}), "('refresh_ref_docs done')\n", (2098, 2123), False, 'from loguru import logger\n'), ((1808, 1840), 'llama_index.core.Document', 'Document', ([], {'text': 'value', 'doc_id': 'key'}), '(text=value, doc_id=key)\n', (1816, 1840), False, 'from llama_index.core import Document, VectorStoreIndex, Settings\n')] |
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.extractors.entity import EntityExtractor
reader = SimpleDirectoryReader('files')
documents = reader.load_data()
parser = SentenceSplitter(include_prev_next_rel=True)
nodes = parser.get_nodes_from_documents(documents)
entity_extractor = EntityExtractor(
label_entities = True,
device = "cpu"
)
metadata_list = entity_extractor.extract(nodes)
print(metadata_list)
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.extractors.entity.EntityExtractor"
] | [((177, 207), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (198, 207), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((248, 292), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'include_prev_next_rel': '(True)'}), '(include_prev_next_rel=True)\n', (264, 292), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((364, 414), 'llama_index.extractors.entity.EntityExtractor', 'EntityExtractor', ([], {'label_entities': '(True)', 'device': '"""cpu"""'}), "(label_entities=True, device='cpu')\n", (379, 414), False, 'from llama_index.extractors.entity import EntityExtractor\n')] |
from llama_index.llms.llama_cpp import LlamaCPP
from llama_index.llms.llama_cpp.llama_utils import (
messages_to_prompt,
completion_to_prompt,
)
from llama_index.llms.openai import OpenAI
from core.manager import settings
MODEL = "openai"
# LLM selection
if MODEL == "openai":
print("USE OPENAI")
# Use OpenAI model
system_prompt = """If the user is greeting then respond by saying, "Hello, how may we help you ?"
"""
llm = OpenAI(model="gpt-4-turbo-preview", api_key=settings.OPENAI_KEY, system_prompt=system_prompt)
else: # Default to Llama
print("USE LLAMA")
# model_url: str = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/llama-2-13b-chat.Q4_0.gguf"
model_path: str = "core/models/llama-2-13b-chat.Q2_K.gguf"
# TODO: Save the model automatically the first time
# Check if model is already downloaded
# if not os.path.exists(model_path):
# print("Model not found. Downloading...")
# response = requests.get(model_url)
# with open(model_path, "wb") as f:
# f.write(response.content)
# print("Model downloaded and saved.")
# else:
# print("Model found.")
llm = LlamaCPP(
# model_url=model_url,
model_path=model_path,
temperature=0.1,
max_new_tokens=256,
context_window=3900,
model_kwargs={"n_gpu_layers": 2}, # set GPU layers to 1 if you have one
verbose=True,
messages_to_prompt=messages_to_prompt, # providing additional parameters
completion_to_prompt=completion_to_prompt, # providing additional parameters
)
if __name__ == "__main__":
print("LLM")
| [
"llama_index.llms.openai.OpenAI",
"llama_index.llms.llama_cpp.LlamaCPP"
] | [((470, 567), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-turbo-preview"""', 'api_key': 'settings.OPENAI_KEY', 'system_prompt': 'system_prompt'}), "(model='gpt-4-turbo-preview', api_key=settings.OPENAI_KEY,\n system_prompt=system_prompt)\n", (476, 567), False, 'from llama_index.llms.openai import OpenAI\n'), ((1219, 1451), 'llama_index.llms.llama_cpp.LlamaCPP', 'LlamaCPP', ([], {'model_path': 'model_path', 'temperature': '(0.1)', 'max_new_tokens': '(256)', 'context_window': '(3900)', 'model_kwargs': "{'n_gpu_layers': 2}", 'verbose': '(True)', 'messages_to_prompt': 'messages_to_prompt', 'completion_to_prompt': 'completion_to_prompt'}), "(model_path=model_path, temperature=0.1, max_new_tokens=256,\n context_window=3900, model_kwargs={'n_gpu_layers': 2}, verbose=True,\n messages_to_prompt=messages_to_prompt, completion_to_prompt=\n completion_to_prompt)\n", (1227, 1451), False, 'from llama_index.llms.llama_cpp import LlamaCPP\n')] |
from llama_index import SimpleDirectoryReader,VectorStoreIndex , load_index_from_storage
from llama_index.storage.storage_context import StorageContext
from dotenv import load_dotenv
import logging
import sys
load_dotenv()
# enable INFO level logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
async def load_index(directory_path : str = r'data'):
documents = SimpleDirectoryReader(directory_path, filename_as_id=True).load_data()
print(f"loaded documents with {len(documents)} pages")
try:
# Rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir="./storage")
# Try to load the index from storage
index = load_index_from_storage(storage_context)
logging.info("Index loaded from storage.")
except FileNotFoundError:
logging.info("Index not found. Creating a new one...")
index = VectorStoreIndex.from_documents(documents)
# Persist index to disk
index.storage_context.persist()
logging.info("New index created and persisted to storage.")
return index
async def update_index(directory_path : str = r'data'):
try:
documents = SimpleDirectoryReader(directory_path, filename_as_id=True).load_data()
except FileNotFoundError:
logging.error("Invalid document directory path.")
return None
try:
# Rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir="./storage")
# Try to load the index from storage
index = load_index_from_storage(storage_context)
logging.info("Existing index loaded from storage.")
refreshed_docs = index.refresh_ref_docs(documents, update_kwargs={"delete_kwargs": {"delete_from_docstore": True}})
# index.update_ref_doc()
print(refreshed_docs)
print('Number of newly inserted/refreshed docs: ', sum(refreshed_docs))
index.storage_context.persist()
logging.info("Index refreshed and persisted to storage.")
return refreshed_docs
except FileNotFoundError:
# Run refresh_ref_docs function to check for document updates
logging.error("Index is not created yet.")
return None
| [
"llama_index.SimpleDirectoryReader",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage"
] | [((212, 225), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (223, 225), False, 'from dotenv import load_dotenv\n'), ((255, 313), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (274, 313), False, 'import logging\n'), ((345, 385), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (366, 385), False, 'import logging\n'), ((314, 333), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (331, 333), False, 'import logging\n'), ((658, 711), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (686, 711), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((773, 813), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (796, 813), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((822, 864), 'logging.info', 'logging.info', (['"""Index loaded from storage."""'], {}), "('Index loaded from storage.')\n", (834, 864), False, 'import logging\n'), ((1510, 1563), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1538, 1563), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1625, 1665), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1648, 1665), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((1674, 1725), 'logging.info', 'logging.info', (['"""Existing index loaded from storage."""'], {}), "('Existing index loaded from storage.')\n", (1686, 1725), False, 'import logging\n'), ((2042, 2099), 'logging.info', 'logging.info', (['"""Index refreshed and persisted to storage."""'], {}), "('Index refreshed and persisted to storage.')\n", (2054, 2099), False, 'import logging\n'), ((459, 517), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_path'], {'filename_as_id': '(True)'}), '(directory_path, filename_as_id=True)\n', (480, 517), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((904, 958), 'logging.info', 'logging.info', (['"""Index not found. Creating a new one..."""'], {}), "('Index not found. Creating a new one...')\n", (916, 958), False, 'import logging\n'), ((975, 1017), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1006, 1017), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((1098, 1157), 'logging.info', 'logging.info', (['"""New index created and persisted to storage."""'], {}), "('New index created and persisted to storage.')\n", (1110, 1157), False, 'import logging\n'), ((1371, 1420), 'logging.error', 'logging.error', (['"""Invalid document directory path."""'], {}), "('Invalid document directory path.')\n", (1384, 1420), False, 'import logging\n'), ((2244, 2286), 'logging.error', 'logging.error', (['"""Index is not created yet."""'], {}), "('Index is not created yet.')\n", (2257, 2286), False, 'import logging\n'), ((1262, 1320), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_path'], {'filename_as_id': '(True)'}), '(directory_path, filename_as_id=True)\n', (1283, 1320), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n')] |
import asyncio
from typing import Any, Optional, Sequence
from llama_index.evaluation import CorrectnessEvaluator, EvaluationResult
from wandbot.evaluation.eval.utils import (
make_eval_template,
safe_parse_eval_response,
)
SYSTEM_TEMPLATE = """You are a Weight & Biases support expert tasked with evaluating the relevancy of answers to questions asked by users to a technical support chatbot.
You are given the following information:
- a user query,
- a reference answer
- a generated answer.
Your job is to judge the relevance the generated answer to the user query.
- Consider whether the answer addresses all aspects of the question and aligns with the user's intent and provides appropriate and on-topic response.
- Measure the generated answer on its sensibleness, meaning it needs to make sense in context and be specific i.e. it is comprehensive without being too vague.
- Compare the generated answer to the reference answer for its relevancy, sensibleness and specificity.
- Output a score and a decision that represents a holistic evaluation of the generated answer.
- You must return your response only in the below mentioned format. Do not return answers in any other format.
Follow these guidelines for scoring:
- Your score has to be between 1 and 3, where 1 is the worst and 3 is the best.
- If the generated answer is not relevant to the user query, you should give a score of 1.
- If the generated answer is relevant but contains mistakes or lacks specificity, you should give a score of 2.
- If the generated answer is relevant and comprehensive, you should give a score of 3.
Output your final verdict by strictly following JSON format:
{{
"reason": <<Provide a brief explanation for your decision here>>,
"score": <<Provide a score as per the above guidelines>>,
"decision": <<Provide your final decision here, either 'relevant', or 'irrelevant'>>
}}
Example Response 1:
{{
"reason": "The generated answer is relevant and provides a similar level of detail as the reference answer. It also provides information that is relevant to the user's query.",
"score": 3,
"decision": "relevant"
}}
Example Response 2:
{{
"reason": "The generated answer deviates significantly from the reference answer, and is not directly answering the user's query",
"score": 1,
"decision": "irrelevant"
}}
Example Response 3:
{{
"reason": "The generated answer is relevant and provides a similar level of detail as the reference answer. However, it introduces variations in the code example that are not mentioned in the documentation. This could potentially confuse users if the method is not part of the documented API.
"score": 2,
"decision": "irrelevant"
}}
"""
USER_TEMPLATE = """
## User Query
{query}
## Reference Answer
{reference_answer}
## Generated Answer
{generated_answer}
"""
RELEVANCY_EVAL_TEMPLATE = make_eval_template(SYSTEM_TEMPLATE, USER_TEMPLATE)
class WandbRelevancyEvaluator(CorrectnessEvaluator):
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
reference: Optional[str] = None,
sleep_time_in_seconds: int = 0,
**kwargs: Any,
) -> EvaluationResult:
await asyncio.sleep(sleep_time_in_seconds)
if query is None or response is None or reference is None:
print(query, response, reference, flush=True)
raise ValueError("query, response, and reference must be provided")
eval_response = await self._service_context.llm.apredict(
prompt=self._eval_template,
query=query,
generated_answer=response,
reference_answer=reference,
)
passing, reasoning, score = safe_parse_eval_response(
eval_response, "relevant"
)
return EvaluationResult(
query=query,
response=response,
passing=passing,
score=score,
feedback=reasoning,
)
| [
"llama_index.evaluation.EvaluationResult"
] | [((2886, 2936), 'wandbot.evaluation.eval.utils.make_eval_template', 'make_eval_template', (['SYSTEM_TEMPLATE', 'USER_TEMPLATE'], {}), '(SYSTEM_TEMPLATE, USER_TEMPLATE)\n', (2904, 2936), False, 'from wandbot.evaluation.eval.utils import make_eval_template, safe_parse_eval_response\n'), ((3804, 3855), 'wandbot.evaluation.eval.utils.safe_parse_eval_response', 'safe_parse_eval_response', (['eval_response', '"""relevant"""'], {}), "(eval_response, 'relevant')\n", (3828, 3855), False, 'from wandbot.evaluation.eval.utils import make_eval_template, safe_parse_eval_response\n'), ((3894, 3997), 'llama_index.evaluation.EvaluationResult', 'EvaluationResult', ([], {'query': 'query', 'response': 'response', 'passing': 'passing', 'score': 'score', 'feedback': 'reasoning'}), '(query=query, response=response, passing=passing, score=\n score, feedback=reasoning)\n', (3910, 3997), False, 'from llama_index.evaluation import CorrectnessEvaluator, EvaluationResult\n'), ((3303, 3339), 'asyncio.sleep', 'asyncio.sleep', (['sleep_time_in_seconds'], {}), '(sleep_time_in_seconds)\n', (3316, 3339), False, 'import asyncio\n')] |
from typing import List
from fastapi import APIRouter, Depends, HTTPException, status
from llama_index.chat_engine.types import BaseChatEngine
from llama_index.llms.base import ChatMessage
from llama_index.llms.types import MessageRole
from pydantic import BaseModel
from app.engine.index import get_chat_engine
chat_router = r = APIRouter()
class _Message(BaseModel):
role: MessageRole
content: str
context: List[str] | None = None
class _ChatData(BaseModel):
messages: List[_Message]
class _Result(BaseModel):
result: _Message
@r.post("")
async def chat(
data: _ChatData,
chat_engine: BaseChatEngine = Depends(get_chat_engine),
) -> _Result:
# check preconditions and get last message
if len(data.messages) == 0:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No messages provided",
)
lastMessage = data.messages.pop()
if lastMessage.role != MessageRole.USER:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Last message must be from user",
)
# convert messages coming from the request to type ChatMessage
messages = [
ChatMessage(
role=m.role,
content=m.content,
)
for m in data.messages
]
# query chat engine
response = await chat_engine.achat(lastMessage.content, messages)
return _Result(
result=_Message(
role=MessageRole.ASSISTANT,
content=response.response,
context=[x.text for x in response.source_nodes]
)
)
| [
"llama_index.llms.base.ChatMessage"
] | [((332, 343), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (341, 343), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((647, 671), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (654, 671), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((780, 870), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (793, 870), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((998, 1098), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (1011, 1098), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((1221, 1264), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (1232, 1264), False, 'from llama_index.llms.base import ChatMessage\n')] |
from pymongo import MongoClient
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.docstore.mongodb.base import MongoDBKVStore
import os
import streamlit as st
DEFAULT_DB_NAME: str = "aegis-athena-data"
@st.cache_resource
def get_client() -> MongoClient:
return MongoClient(
host=os.getenv("MONGODB_HOST"),
port=int(os.getenv("MONGODB_PORT")),
)
def as_docstore(client: MongoClient, db_name: str = DEFAULT_DB_NAME) -> MongoDocumentStore:
return MongoDocumentStore(
mongo_kvstore=MongoDBKVStore(
mongo_client=client,
db_name=db_name,
),
)
| [
"llama_index.storage.docstore.mongodb.base.MongoDBKVStore"
] | [((338, 363), 'os.getenv', 'os.getenv', (['"""MONGODB_HOST"""'], {}), "('MONGODB_HOST')\n", (347, 363), False, 'import os\n'), ((563, 615), 'llama_index.storage.docstore.mongodb.base.MongoDBKVStore', 'MongoDBKVStore', ([], {'mongo_client': 'client', 'db_name': 'db_name'}), '(mongo_client=client, db_name=db_name)\n', (577, 615), False, 'from llama_index.storage.docstore.mongodb.base import MongoDBKVStore\n'), ((382, 407), 'os.getenv', 'os.getenv', (['"""MONGODB_PORT"""'], {}), "('MONGODB_PORT')\n", (391, 407), False, 'import os\n')] |
import os
from llama_index import LLMPredictor, VectorStoreIndex, SimpleDirectoryReader, ServiceContext, LangchainEmbedding
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import AzureOpenAI
import openai
import logging
import sys
#llamaindex logs
logging.basicConfig(stream=sys.stdout, level=logging.INFO) # logging.DEBUG for more verbose output
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
#Enable to show openai logs
#openai.log='debug'
#Based on your settings, see version, base, key in your Azure AI portal
api_type = "azure"
api_version = "2023-03-15-preview"
api_base = os.getenv("AZURE_API_BASE")
api_key = os.getenv("AZURE_API_KEY")
chat_deployment = "gpt35"
embedding_deployment= "text-embedding-ada-002"
# Chat model
llm = AzureOpenAI(deployment_name=chat_deployment, openai_api_base=api_base, openai_api_key=api_key, model_kwargs={
"api_type": api_type,
"api_version": api_version,
})
llm_predictor = LLMPredictor(llm=llm)
# Embedding model
embedding_llm = LangchainEmbedding(
OpenAIEmbeddings(
model=embedding_deployment,
deployment=embedding_deployment,
openai_api_key=api_key,
openai_api_base=api_base,
openai_api_type=api_type,
openai_api_version=api_version,
),
embed_batch_size=1
)
#load docs
documents = SimpleDirectoryReader('local-data').load_data()
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embedding_llm)
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
index.storage_context.persist(persist_dir="local-index-azure")
print("Saved embeddings")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.LLMPredictor"
] | [((271, 329), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (290, 329), False, 'import logging\n'), ((630, 657), 'os.getenv', 'os.getenv', (['"""AZURE_API_BASE"""'], {}), "('AZURE_API_BASE')\n", (639, 657), False, 'import os\n'), ((668, 694), 'os.getenv', 'os.getenv', (['"""AZURE_API_KEY"""'], {}), "('AZURE_API_KEY')\n", (677, 694), False, 'import os\n'), ((788, 955), 'langchain.llms.AzureOpenAI', 'AzureOpenAI', ([], {'deployment_name': 'chat_deployment', 'openai_api_base': 'api_base', 'openai_api_key': 'api_key', 'model_kwargs': "{'api_type': api_type, 'api_version': api_version}"}), "(deployment_name=chat_deployment, openai_api_base=api_base,\n openai_api_key=api_key, model_kwargs={'api_type': api_type,\n 'api_version': api_version})\n", (799, 955), False, 'from langchain.llms import AzureOpenAI\n'), ((975, 996), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (987, 996), False, 'from llama_index import LLMPredictor, VectorStoreIndex, SimpleDirectoryReader, ServiceContext, LangchainEmbedding\n'), ((1414, 1503), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embedding_llm'}), '(llm_predictor=llm_predictor, embed_model=\n embedding_llm)\n', (1442, 1503), False, 'from llama_index import LLMPredictor, VectorStoreIndex, SimpleDirectoryReader, ServiceContext, LangchainEmbedding\n'), ((1508, 1583), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1539, 1583), False, 'from llama_index import LLMPredictor, VectorStoreIndex, SimpleDirectoryReader, ServiceContext, LangchainEmbedding\n'), ((401, 441), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (422, 441), False, 'import logging\n'), ((1056, 1250), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': 'embedding_deployment', 'deployment': 'embedding_deployment', 'openai_api_key': 'api_key', 'openai_api_base': 'api_base', 'openai_api_type': 'api_type', 'openai_api_version': 'api_version'}), '(model=embedding_deployment, deployment=\n embedding_deployment, openai_api_key=api_key, openai_api_base=api_base,\n openai_api_type=api_type, openai_api_version=api_version)\n', (1072, 1250), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((370, 389), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (387, 389), False, 'import logging\n'), ((1347, 1382), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""local-data"""'], {}), "('local-data')\n", (1368, 1382), False, 'from llama_index import LLMPredictor, VectorStoreIndex, SimpleDirectoryReader, ServiceContext, LangchainEmbedding\n')] |
import time
import os
import streamlit as st
import openai
import logging
import sys
import llama_index
from qdrant_client import QdrantClient
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.llms import OpenAI
from llama_index import SimpleDirectoryReader
from llama_index.storage.storage_context import StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index import set_global_service_context
from llama_index.embeddings import VoyageEmbedding
from qdrant_client.models import Distance, VectorParams
version = "1.0.2"
st.set_page_config(page_title=f"Courier v{version}", page_icon="🌎", layout="centered", initial_sidebar_state="auto", menu_items=None)
st.title(f"Courier v{version}")
# Set up logging and tracing via Arize Phoenix
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# Use Voyager Lite Embeddings
model_name = "voyage-lite-01-instruct"
voyage_api_key = os.environ.get("VOYAGE_API_KEY", "")
embed_model = VoyageEmbedding(
model_name=model_name, voyage_api_key=voyage_api_key
)
# Update Custom QA Template with Gaia Information
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "Hello, my name is Courier. I'm an Generative AI Assistant designed to help Proctor Academy students. Ask me anything about the Proctor Handbook or any current Proctor Academy staff."}
]
openai.api_key = ""
@st.cache_resource(show_spinner=False)
def load_data():
with st.spinner(text=f"Loading Courier v{version} ..."):
docs = SimpleDirectoryReader(input_dir="./data", recursive=True).load_data()
qdrant_client = QdrantClient(
url="https://02aec354-4932-4062-9e00-422eacb506fc.us-east4-0.gcp.cloud.qdrant.io",
api_key="",
)
qdrant_client.create_collection(collection_name="courierv52",vectors_config=VectorParams(size=1024, distance=Distance.EUCLID),)
service_context = ServiceContext.from_defaults(embed_model=embed_model,llm=OpenAI(model="gpt-4", max_tokens=1500, temperature=0.5, system_prompt="Keep your answers technical and based on facts and do not hallucinate in responses. In addition, make sure all responses look natural, no Answer: or Query: in the response. Always attempt to query database."))
set_global_service_context(service_context)
vector_store = QdrantVectorStore(client=qdrant_client, collection_name="courierv52")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
docs, storage_context=storage_context, service_context=service_context,
)
return index
index = load_data()
if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine
st.session_state.chat_engine = index.as_chat_engine(streaming=True,chat_mode="condense_question",max_tokens=1500,verbose=True)
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
res_box = st.empty() # Placeholder for the response text
with st.spinner("Thinking..."):
response = st.session_state.chat_engine.stream_chat(prompt)
full_response = ""
for token in response.response_gen:
full_response += "".join(token)
res_box.write(full_response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.embeddings.VoyageEmbedding",
"llama_index.llms.OpenAI",
"llama_index.set_global_service_context",
"llama_index.vector_stores.qdrant.QdrantVectorStore"
] | [((585, 723), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'f"""Courier v{version}"""', 'page_icon': '"""🌎"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=f'Courier v{version}', page_icon='🌎', layout=\n 'centered', initial_sidebar_state='auto', menu_items=None)\n", (603, 723), True, 'import streamlit as st\n'), ((719, 750), 'streamlit.title', 'st.title', (['f"""Courier v{version}"""'], {}), "(f'Courier v{version}')\n", (727, 750), True, 'import streamlit as st\n'), ((799, 857), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (818, 857), False, 'import logging\n'), ((1019, 1055), 'os.environ.get', 'os.environ.get', (['"""VOYAGE_API_KEY"""', '""""""'], {}), "('VOYAGE_API_KEY', '')\n", (1033, 1055), False, 'import os\n'), ((1071, 1140), 'llama_index.embeddings.VoyageEmbedding', 'VoyageEmbedding', ([], {'model_name': 'model_name', 'voyage_api_key': 'voyage_api_key'}), '(model_name=model_name, voyage_api_key=voyage_api_key)\n', (1086, 1140), False, 'from llama_index.embeddings import VoyageEmbedding\n'), ((1573, 1610), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1590, 1610), True, 'import streamlit as st\n'), ((889, 929), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (910, 929), False, 'import logging\n'), ((1220, 1243), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1241, 1243), True, 'import streamlit as st\n'), ((2878, 2901), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (2899, 2901), True, 'import streamlit as st\n'), ((3077, 3107), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (3090, 3107), True, 'import streamlit as st\n'), ((3162, 3231), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (3194, 3231), True, 'import streamlit as st\n'), ((858, 877), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (875, 877), False, 'import logging\n'), ((1637, 1687), 'streamlit.spinner', 'st.spinner', ([], {'text': 'f"""Loading Courier v{version} ..."""'}), "(text=f'Loading Courier v{version} ...')\n", (1647, 1687), True, 'import streamlit as st\n'), ((1798, 1915), 'qdrant_client.QdrantClient', 'QdrantClient', ([], {'url': '"""https://02aec354-4932-4062-9e00-422eacb506fc.us-east4-0.gcp.cloud.qdrant.io"""', 'api_key': '""""""'}), "(url=\n 'https://02aec354-4932-4062-9e00-422eacb506fc.us-east4-0.gcp.cloud.qdrant.io'\n , api_key='')\n", (1810, 1915), False, 'from qdrant_client import QdrantClient\n'), ((2449, 2492), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (2475, 2492), False, 'from llama_index import set_global_service_context\n'), ((2516, 2585), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'qdrant_client', 'collection_name': '"""courierv52"""'}), "(client=qdrant_client, collection_name='courierv52')\n", (2533, 2585), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((2612, 2667), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2640, 2667), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((2684, 2791), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(docs, storage_context=storage_context,\n service_context=service_context)\n', (2715, 2791), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((3318, 3350), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (3333, 3350), True, 'import streamlit as st\n'), ((3360, 3388), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (3368, 3388), True, 'import streamlit as st\n'), ((3525, 3553), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (3540, 3553), True, 'import streamlit as st\n'), ((3577, 3587), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (3585, 3587), True, 'import streamlit as st\n'), ((3642, 3667), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (3652, 3667), True, 'import streamlit as st\n'), ((3696, 3744), 'streamlit.session_state.chat_engine.stream_chat', 'st.session_state.chat_engine.stream_chat', (['prompt'], {}), '(prompt)\n', (3736, 3744), True, 'import streamlit as st\n'), ((4027, 4068), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (4059, 4068), True, 'import streamlit as st\n'), ((1704, 1761), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""./data"""', 'recursive': '(True)'}), "(input_dir='./data', recursive=True)\n", (1725, 1761), False, 'from llama_index import SimpleDirectoryReader\n'), ((2025, 2074), 'qdrant_client.models.VectorParams', 'VectorParams', ([], {'size': '(1024)', 'distance': 'Distance.EUCLID'}), '(size=1024, distance=Distance.EUCLID)\n', (2037, 2074), False, 'from qdrant_client.models import Distance, VectorParams\n'), ((2160, 2449), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'max_tokens': '(1500)', 'temperature': '(0.5)', 'system_prompt': '"""Keep your answers technical and based on facts and do not hallucinate in responses. In addition, make sure all responses look natural, no Answer: or Query: in the response. Always attempt to query database."""'}), "(model='gpt-4', max_tokens=1500, temperature=0.5, system_prompt=\n 'Keep your answers technical and based on facts and do not hallucinate in responses. In addition, make sure all responses look natural, no Answer: or Query: in the response. Always attempt to query database.'\n )\n", (2166, 2449), False, 'from llama_index.llms import OpenAI\n')] |
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings import LangchainEmbedding
from llama_index import ServiceContext
from llama_index import VectorStoreIndex
from llama_index.storage.storage_context import StorageContext
from llama_index.vector_stores import ChromaVectorStore
from jinja2 import Template
import requests
from decouple import config
import torch
from chromadatabase import load_collection
import nltk
from maps_scraper import *
import pandas as pd
from time import sleep
from context import *
import pyshorteners as ps
nltk.download('stopwords')
from nltk.corpus import stopwords
spanish_stop_words = stopwords.words('spanish')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"PyTorch está utilizando el dispositivo: {device}")
HUGGINGFACE_TOKEN=config('HUGGINGFACE_TOKEN')
def zephyr_instruct_template(messages, add_generation_prompt=True):
# Definir la plantilla Jinja
template_str = "{% for message in messages %}"
template_str += "{% if message['role'] == 'user' %}"
template_str += "<|user|>{{ message['content'] }}</s>\n"
template_str += "{% elif message['role'] == 'assistant' %}"
template_str += "<|assistant|>{{ message['content'] }}</s>\n"
template_str += "{% elif message['role'] == 'system' %}"
template_str += "<|system|>{{ message['content'] }}</s>\n"
template_str += "{% else %}"
template_str += "<|unknown|>{{ message['content'] }}</s>\n"
template_str += "{% endif %}"
template_str += "{% endfor %}"
template_str += "{% if add_generation_prompt %}"
template_str += "<|assistant|>\n"
template_str += "{% endif %}"
# Crear un objeto de plantilla con la cadena de plantilla
template = Template(template_str)
# Renderizar la plantilla con los mensajes proporcionados
return template.render(messages=messages, add_generation_prompt=add_generation_prompt)
# Aquí hacemos la llamada el modelo
def generate_answer(prompt: str, max_new_tokens: int = 768, ) -> None:
try:
# Tu clave API de Hugging Face
api_key = config('HUGGINGFACE_TOKEN')
# URL de la API de Hugging Face para la generación de texto
api_url = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
# Cabeceras para la solicitud
headers = {"Authorization": f"Bearer {api_key}"}
# Datos para enviar en la solicitud POST
# Sobre los parámetros: https://huggingface.co/docs/transformers/main_classes/text_generation
data = {
"inputs": prompt,
"parameters": {
"max_new_tokens": max_new_tokens,
"temperature": 0.7,
"top_k": 50,
"top_p": 0.95
}
}
# Realizamos la solicitud POST
response = requests.post(api_url, headers=headers, json=data)
# Extraer respuesta
respuesta = response.json()[0]["generated_text"][len(prompt):]
return respuesta
except Exception as e:
print(f"An error occurred: {e}")
# Esta función prepara el prompt en estilo QA
def prepare_prompt(query_str: str, nodes: list, user_info: str = None):
TEXT_QA_PROMPT_TMPL = (
"La información del usuario es la siguiente:\n"
"---------------------\n"
"{user_info_str}\n"
"---------------------\n"
"La información de contexto es la siguiente:\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"RESPONDE EN ESPAÑOL. Dada la información de contexto anterior, y sin utilizar conocimiento previo, responde en español la siguiente consulta. En caso de que tu respuesta sea una receta envíala con título, ingredientes, procedimiento. No debes agregar recetas de otros libros ni material adicional. En caso de que la receta pedida no se encuentre en el material provisto debes aclararlo y no enviar receta.\n"
"Pregunta: {query_str}\n"
"Respuesta: "
)
# Construimos el contexto de la pregunta
context_str = ''
for node in nodes:
page_label = node.metadata["page_label"]
file_path = node.metadata["file_path"]
context_str += f"\npage_label: {page_label}\n"
context_str += f"file_path: {file_path}\n\n"
context_str += f"{node.text}\n"
messages = [
{
"role": "system",
"content": "Eres un asistente de cocina útil que siempre responde con respuestas veraces, útiles y basadas en hechos.",
},
{"role": "user", "content": TEXT_QA_PROMPT_TMPL.format(context_str=context_str, query_str=query_str, user_info_str=user_info)},
]
final_prompt = zephyr_instruct_template(messages)
return final_prompt
def load_model():
print('Cargando modelo de embeddings...')
embed_model = LangchainEmbedding(HuggingFaceEmbeddings(
model_name='sentence-transformers/paraphrase-multilingual-mpnet-base-v2',
model_kwargs={'device': 'cuda'},
encode_kwargs={'normalize_embeddings': True}
)
)
print('Indexando documentos...')
chroma_collection = load_collection()
# set up ChromaVectorStore and load in data
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
service_context = ServiceContext.from_defaults(embed_model=embed_model, llm=None)
index = VectorStoreIndex.from_vector_store(
vector_store, storage_context=storage_context, service_context=service_context, show_progress=True
)
retriever = index.as_retriever(similarity_top_k=2)
return retriever
def clas(query_str: str, clasificador, vectorizer, retriever, user_id: int = 0):
os.chdir(os.path.dirname(os.path.abspath(__file__)))
vectorized_query = vectorizer.transform([query_str])
prediction = clasificador.predict(vectorized_query)
if prediction[0] == 1:
context = generate_context(user_id)
answer = get_answer(retriever, query_str, context)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
return answer
else:
resultados=[]
places, keywords, locations = extract_entities(query_str)
obtain_places(places[0], " ".join(keywords), locations[0], query_str)
# Seleccionar los primeros 5 restaurantes
df = pd.read_csv(os.path.dirname(os.path.abspath(__file__))+"/tabular_data/" + query_str.replace(" ","") + ".csv")
primeros_5 = df.head(5)
# Generar el resultado escrito
for index, restaurante in primeros_5.iterrows():
resultado_escrito = ""
resultado_escrito += f"Restaurante: {restaurante['name']}\n"
resultado_escrito += f"Enlace: {ps.Shortener().tinyurl.short(restaurante['link'])}\n"
resultado_escrito += f"Calificación: {restaurante['rating']}\n"
resultado_escrito += f"Dirección: {restaurante['address']}\n\n"
resultados.append(resultado_escrito)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
return resultados
def get_answer(retriever, query_str:str, context: str = None):
os.chdir(os.path.dirname(os.path.abspath(__file__)))
nodes = retriever.retrieve(query_str)
final_prompt = prepare_prompt(query_str, nodes, context)
return generate_answer(final_prompt) | [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.ChromaVectorStore"
] | [((579, 605), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (592, 605), False, 'import nltk\n'), ((662, 688), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""spanish"""'], {}), "('spanish')\n", (677, 688), False, 'from nltk.corpus import stopwords\n'), ((838, 865), 'decouple.config', 'config', (['"""HUGGINGFACE_TOKEN"""'], {}), "('HUGGINGFACE_TOKEN')\n", (844, 865), False, 'from decouple import config\n'), ((1760, 1782), 'jinja2.Template', 'Template', (['template_str'], {}), '(template_str)\n', (1768, 1782), False, 'from jinja2 import Template\n'), ((5108, 5125), 'chromadatabase.load_collection', 'load_collection', ([], {}), '()\n', (5123, 5125), False, 'from chromadatabase import load_collection\n'), ((5194, 5248), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (5211, 5248), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((5271, 5326), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (5299, 5326), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((5349, 5412), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm': 'None'}), '(embed_model=embed_model, llm=None)\n', (5377, 5412), False, 'from llama_index import ServiceContext\n'), ((5425, 5564), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'storage_context': 'storage_context', 'service_context': 'service_context', 'show_progress': '(True)'}), '(vector_store, storage_context=\n storage_context, service_context=service_context, show_progress=True)\n', (5459, 5564), False, 'from llama_index import VectorStoreIndex\n'), ((722, 747), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (745, 747), False, 'import torch\n'), ((2112, 2139), 'decouple.config', 'config', (['"""HUGGINGFACE_TOKEN"""'], {}), "('HUGGINGFACE_TOKEN')\n", (2118, 2139), False, 'from decouple import config\n'), ((2853, 2903), 'requests.post', 'requests.post', (['api_url'], {'headers': 'headers', 'json': 'data'}), '(api_url, headers=headers, json=data)\n', (2866, 2903), False, 'import requests\n'), ((4836, 5023), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/paraphrase-multilingual-mpnet-base-v2"""', 'model_kwargs': "{'device': 'cuda'}", 'encode_kwargs': "{'normalize_embeddings': True}"}), "(model_name=\n 'sentence-transformers/paraphrase-multilingual-mpnet-base-v2',\n model_kwargs={'device': 'cuda'}, encode_kwargs={'normalize_embeddings':\n True})\n", (4857, 5023), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((6757, 6771), 'pyshorteners.Shortener', 'ps.Shortener', ([], {}), '()\n', (6769, 6771), True, 'import pyshorteners as ps\n')] |
#! coding: utf-8
import os
from dataclasses import dataclass
from typing import List, Dict, Optional
from llama_index import ServiceContext, get_response_synthesizer, VectorStoreIndex, StorageContext, \
load_indices_from_storage, TreeIndex
from llama_index.indices.base import BaseIndex
from llama_index.indices.postprocessor import LLMRerank
from llama_index.indices.tree.base import TreeRetrieverMode
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.response_synthesizers import ResponseMode, BaseSynthesizer
from common.config import index_dir
from common.prompt import CH_CHOICE_SELECT_PROMPT, CH_TREE_SUMMARIZE_PROMPT
from query_todo.retrievers import MultiRetriever
def load_index(title: str, service_context: ServiceContext = None) -> List[BaseIndex]:
storage_context = StorageContext.from_defaults(persist_dir=os.path.join(index_dir, title))
return load_indices_from_storage(
storage_context=storage_context,
service_context=service_context,
)
def load_indices(service_context: ServiceContext) -> Dict[str, List[BaseIndex]]:
indices: Dict[str, List[BaseIndex]] = {}
for title in os.listdir(index_dir):
indices[title] = load_index(title, service_context)
return indices
def create_response_synthesizer(service_context: ServiceContext = None) -> BaseSynthesizer:
# TODO
# https://docs.llamaindex.ai/en/stable/module_guides/querying/response_synthesizers/root.html#get-started
raise NotImplementedError
@dataclass
class DocumentQueryEngineFactory:
indices: List[BaseIndex]
summary: Optional[str] = ""
def first_index(self):
return self.indices[0]
def create_retrievers(self):
# TODO
# 基于indices 创建多个retriever
# https://docs.llamaindex.ai/en/stable/understanding/querying/querying.html#customizing-the-stages-of-querying
raise NotImplementedError
def doc_store(self):
return self.indices[0].docstore
def create_query_engine(self, service_context: ServiceContext) -> RetrieverQueryEngine:
# TODO
# 结合 retriever, llm_rerank, response_synthesizer 创建一个完整的query engine
# https://docs.llamaindex.ai/en/stable/understanding/querying/querying.html
raise NotImplementedError
| [
"llama_index.load_indices_from_storage"
] | [((899, 995), 'llama_index.load_indices_from_storage', 'load_indices_from_storage', ([], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(storage_context=storage_context, service_context=\n service_context)\n', (924, 995), False, 'from llama_index import ServiceContext, get_response_synthesizer, VectorStoreIndex, StorageContext, load_indices_from_storage, TreeIndex\n'), ((1159, 1180), 'os.listdir', 'os.listdir', (['index_dir'], {}), '(index_dir)\n', (1169, 1180), False, 'import os\n'), ((856, 886), 'os.path.join', 'os.path.join', (['index_dir', 'title'], {}), '(index_dir, title)\n', (868, 886), False, 'import os\n')] |
import logging
import glob
from pathlib import Path
from llama_index import (
SimpleDirectoryReader,
download_loader
)
class DataReader:
__LOGGER_NAME = "data_reader"
__SIMPLE_SUPPORTED_EXTENSIONS = [".csv", ".docx", ".epub", ".hwp", ".ipynb", ".jpeg", ".mbox", ".md", ".mp3", ".pdf", ".png", ".pptm", ".pptx"]
__JSON_READER_LOADER = "JSONReader"
__WIKIPEDIA_READER_LOADER = "WikipediaReader"
def __init__(self, data_dir):
# Set logger
self.logger = logging.getLogger(self.__LOGGER_NAME)
# Data directory and files to load
self.data_dir = data_dir
def load(self):
"""
Loads the documents from all the given directories.
:return: List of llama-index documents
"""
documents = []
if self.data_dir is not None:
loaders = [
self.__load_simple,
self.__load_json,
self.__load_wiki
]
self.logger.info(f"Loading documents from {self.data_dir} directory ...")
for load in loaders:
documents.extend(load())
self.logger.info(f"Loaded {len(documents)} documents")
else:
self.logger.info("No data directory specified, skipping loading documents")
return documents
def __load_simple(self):
"""
Loads the documents from the given data directory only for supported file types.
The best file reader will be automatically selected from the given file extensions.
Docs: https://docs.llamaindex.ai/en/stable/module_guides/loading/simpledirectoryreader.html#supported-file-types
:return: List of llama-index documents
"""
self.logger.debug(f"Loading simple documents ...")
documents = SimpleDirectoryReader(
input_dir=self.data_dir,
required_exts=self.__SIMPLE_SUPPORTED_EXTENSIONS
).load_data()
self.logger.debug(f"Loaded {len(documents)} documents")
return documents
def __load_json(self):
"""
Loads the JSON documents from the given data directory.
:return: List of llama-index documents
"""
json_files = self.__get_all_files_with_ext("json")
JSONReader = download_loader(self.__JSON_READER_LOADER)
loader = JSONReader()
self.logger.debug(f"Loading JSON documents ...")
documents = []
for json_file in json_files:
documents.extend(loader.load_data(Path(json_file), is_jsonl=False))
self.logger.debug(f"Loaded {len(documents)} JSON documents")
return documents
def __load_wiki(self):
"""
Loads the wikipedia pages from the given data directory.
:return: List of llama-index documents
"""
wiki_files = self.__get_all_files_with_ext("wikipedia")
wiki_pages = []
for wiki_file in wiki_files:
wiki_pages.extend(self.__get_pages(wiki_file))
WikipediaReader = download_loader(self.__WIKIPEDIA_READER_LOADER)
loader = WikipediaReader()
self.logger.debug(f"Loading Wikipedia pages ...")
documents = loader.load_data(pages=wiki_pages)
self.logger.debug(f"Loaded {len(documents)} Wikipedia documents")
return documents
def __get_all_files_with_ext(self, file_ext):
"""
Gets all the files with the given extension from the data directory.
:param file_ext: The file extension to search for
:return: List of file paths
"""
return glob.glob(f"{self.data_dir}/*.{file_ext}")
@staticmethod
def __get_pages(file_path):
"""
Reads the pages/links/documents from the given file path.
:param file_path: The path to the file containing the pages
:return: List of pages
"""
with open(file_path, "r") as f:
links = f.readlines()
return links
| [
"llama_index.SimpleDirectoryReader",
"llama_index.download_loader"
] | [((498, 535), 'logging.getLogger', 'logging.getLogger', (['self.__LOGGER_NAME'], {}), '(self.__LOGGER_NAME)\n', (515, 535), False, 'import logging\n'), ((2288, 2330), 'llama_index.download_loader', 'download_loader', (['self.__JSON_READER_LOADER'], {}), '(self.__JSON_READER_LOADER)\n', (2303, 2330), False, 'from llama_index import SimpleDirectoryReader, download_loader\n'), ((3031, 3078), 'llama_index.download_loader', 'download_loader', (['self.__WIKIPEDIA_READER_LOADER'], {}), '(self.__WIKIPEDIA_READER_LOADER)\n', (3046, 3078), False, 'from llama_index import SimpleDirectoryReader, download_loader\n'), ((3591, 3633), 'glob.glob', 'glob.glob', (['f"""{self.data_dir}/*.{file_ext}"""'], {}), "(f'{self.data_dir}/*.{file_ext}')\n", (3600, 3633), False, 'import glob\n'), ((1809, 1910), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'self.data_dir', 'required_exts': 'self.__SIMPLE_SUPPORTED_EXTENSIONS'}), '(input_dir=self.data_dir, required_exts=self.\n __SIMPLE_SUPPORTED_EXTENSIONS)\n', (1830, 1910), False, 'from llama_index import SimpleDirectoryReader, download_loader\n'), ((2525, 2540), 'pathlib.Path', 'Path', (['json_file'], {}), '(json_file)\n', (2529, 2540), False, 'from pathlib import Path\n')] |
from typing import List
from llama_index.core import Document
from llama_index.core.readers.json import JSONReader
def get_content_from_json_file(json_file_path: str, source_type: str, is_jsonl: bool = False) -> List[Document]:
# https://docs.llamaindex.ai/en/stable/api_reference/readers.html
loader = JSONReader(is_jsonl=is_jsonl, levels_back=0, collapse_length=1000)
docs = loader.load_data(json_file_path)
for doc in docs:
doc.metadata["source_id"] = json_file_path
doc.metadata["source_type"] = source_type
return docs
#, extra_info={"source_id": json_file_path})
| [
"llama_index.core.readers.json.JSONReader"
] | [((312, 378), 'llama_index.core.readers.json.JSONReader', 'JSONReader', ([], {'is_jsonl': 'is_jsonl', 'levels_back': '(0)', 'collapse_length': '(1000)'}), '(is_jsonl=is_jsonl, levels_back=0, collapse_length=1000)\n', (322, 378), False, 'from llama_index.core.readers.json import JSONReader\n')] |
from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext
from llama_index.llms import LlamaCPP
llm = LlamaCPP(model_path="./models/llama-2-13b-chat.Q4_0.gguf")
llm_predictor = LLMPredictor(llm=llm)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
set_global_service_context(service_context=service_context)
documents = SimpleDirectoryReader('data').load_data()
index = VectorStoreIndex.from_documents(documents, show_progress=True)
query_engine = index.as_query_engine()
response = query_engine.query("¿Qué tal es la carrera de Administración de Empresas?")
print(response)
response = query_engine.query("¿Qué tal es la carrera de Comercio Exterior?")
print(response)
index.storage_context.persist("./storage")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.LlamaCPP",
"llama_index.set_global_service_context"
] | [((167, 225), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': '"""./models/llama-2-13b-chat.Q4_0.gguf"""'}), "(model_path='./models/llama-2-13b-chat.Q4_0.gguf')\n", (175, 225), False, 'from llama_index.llms import LlamaCPP\n'), ((242, 263), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (254, 263), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext\n'), ((283, 340), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (311, 340), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext\n'), ((342, 401), 'llama_index.set_global_service_context', 'set_global_service_context', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (368, 401), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext\n'), ((466, 528), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'show_progress': '(True)'}), '(documents, show_progress=True)\n', (497, 528), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext\n'), ((415, 444), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (436, 444), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext\n')] |
import os
from llama_index import ServiceContext, ComposableGraph, TreeIndex, VectorStoreIndex, get_response_synthesizer, \
StorageContext, load_indices_from_storage
from llama_index.response_synthesizers import ResponseMode
from common.config import index_dir
from common.llm import create_llm
from common.prompt import CH_QUERY_PROMPT, CH_TREE_SUMMARIZE_PROMPT
from common.utils import find_typed
service_context = ServiceContext.from_defaults(llm=create_llm())
titles = ["北京市", "上海市"]
summaries = []
indices = []
for city in titles:
storage_context = StorageContext.from_defaults(persist_dir=os.path.join(index_dir, city))
city_indices = load_indices_from_storage(
storage_context=storage_context,
service_context=service_context,
)
summary = f"""
此内容包含关于{city}的维基百科文章。
如果您需要查找有关{city}的具体事实,请使用此索引。"
如果您想分析多个城市,请不要使用此索引。
"""
index = find_typed(city_indices, VectorStoreIndex)
indices.append(index)
summaries.append(summary)
graph = ComposableGraph.from_indices(
TreeIndex,
indices,
summaries)
query_engine = graph.as_query_engine(
response_synthesizer=get_response_synthesizer(
response_mode=ResponseMode.TREE_SUMMARIZE,
summary_template=CH_TREE_SUMMARIZE_PROMPT,
service_context=service_context,
),
service_context=service_context,
query_template=CH_QUERY_PROMPT,
)
print(query_engine.query("北京气候如何"))
print(query_engine.query("深圳在中国什么位置"))
| [
"llama_index.load_indices_from_storage",
"llama_index.get_response_synthesizer",
"llama_index.ComposableGraph.from_indices"
] | [((1018, 1077), 'llama_index.ComposableGraph.from_indices', 'ComposableGraph.from_indices', (['TreeIndex', 'indices', 'summaries'], {}), '(TreeIndex, indices, summaries)\n', (1046, 1077), False, 'from llama_index import ServiceContext, ComposableGraph, TreeIndex, VectorStoreIndex, get_response_synthesizer, StorageContext, load_indices_from_storage\n'), ((656, 752), 'llama_index.load_indices_from_storage', 'load_indices_from_storage', ([], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(storage_context=storage_context, service_context=\n service_context)\n', (681, 752), False, 'from llama_index import ServiceContext, ComposableGraph, TreeIndex, VectorStoreIndex, get_response_synthesizer, StorageContext, load_indices_from_storage\n'), ((911, 953), 'common.utils.find_typed', 'find_typed', (['city_indices', 'VectorStoreIndex'], {}), '(city_indices, VectorStoreIndex)\n', (921, 953), False, 'from common.utils import find_typed\n'), ((457, 469), 'common.llm.create_llm', 'create_llm', ([], {}), '()\n', (467, 469), False, 'from common.llm import create_llm\n'), ((1154, 1301), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {'response_mode': 'ResponseMode.TREE_SUMMARIZE', 'summary_template': 'CH_TREE_SUMMARIZE_PROMPT', 'service_context': 'service_context'}), '(response_mode=ResponseMode.TREE_SUMMARIZE,\n summary_template=CH_TREE_SUMMARIZE_PROMPT, service_context=service_context)\n', (1178, 1301), False, 'from llama_index import ServiceContext, ComposableGraph, TreeIndex, VectorStoreIndex, get_response_synthesizer, StorageContext, load_indices_from_storage\n'), ((606, 635), 'os.path.join', 'os.path.join', (['index_dir', 'city'], {}), '(index_dir, city)\n', (618, 635), False, 'import os\n')] |
import json
import logging
from typing import Any, Callable, Dict, List, Mapping, Optional, cast
from llama_index.schema import BaseNode
from llama_index.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
)
from llama_index.schema import (
BaseNode,
ImageNode,
IndexNode,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
from llama_index.vector_stores.utils import node_to_metadata_dict
from byzerllm.utils.client import ByzerLLM
from byzerllm.utils.retrieval import ByzerRetrieval
from byzerllm.apps.llama_index.simple_retrieval import SimpleRetrieval
logger = logging.getLogger(__name__)
def _build_metadata_filter_fn(
metadata_lookup_fn: Callable[[str], Mapping[str, Any]],
metadata_filters: Optional[MetadataFilters] = None,
) -> Callable[[str], bool]:
"""Build metadata filter function."""
filter_list = metadata_filters.legacy_filters() if metadata_filters else []
if not filter_list:
return lambda _: True
def filter_fn(node_id: str) -> bool:
metadata = metadata_lookup_fn(node_id)
for filter_ in filter_list:
metadata_value = metadata.get(filter_.key, None)
if metadata_value is None:
return False
elif isinstance(metadata_value, list):
if filter_.value not in metadata_value:
return False
elif isinstance(metadata_value, (int, float, str, bool)):
if metadata_value != filter_.value:
return False
return True
return filter_fn
def metadata_dict_to_node(metadata: dict, text: Optional[str] = None) -> BaseNode:
"""Common logic for loading Node data from metadata dict."""
node_json = metadata.get("_node_content", None)
node_type = metadata.get("_node_type", None)
if node_json is None:
raise ValueError("Node content not found in metadata dict.")
node: BaseNode
if node_type == IndexNode.class_name():
node = IndexNode.parse_raw(node_json)
elif node_type == ImageNode.class_name():
node = ImageNode.parse_raw(node_json)
else:
node = TextNode.parse_raw(node_json)
if text is not None:
node.set_content(text)
return node
class ByzerAIVectorStore(VectorStore):
stores_text: bool = True
def __init__(
self,
llm:ByzerLLM,
retrieval:ByzerRetrieval,
chunk_collection: Optional[str] = "default",
**kwargs: Any,
) -> None:
self._llm = llm
self._retrieval = SimpleRetrieval(llm=llm, retrieval=retrieval,chunk_collection=chunk_collection,**kwargs)
@property
def client(self) -> None:
"""Get client."""
return
def get(self, text_id: str) -> List[float]:
"""Get embedding."""
v = self._retrieval.get_chunk_by_id(text_id)
if len(v) == 0:
return []
return v[0]["chunk_vector"]
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to index."""
v = []
count = 0
for node in nodes:
metadata = node_to_metadata_dict(
node, remove_text=True, flat_metadata=False
)
metadata.pop("_node_content", None)
m = {
"chunk_id": node.node_id,
"ref_doc_id": node.ref_doc_id,
"metadata": node.metadata,
"chunk_embedding": node.get_embedding(),
"chunk_content": node.get_content(),
"owner":""
}
v.append(m)
self._retrieval.save_chunks(v)
self._retrieval.commit_chunk()
return [node.node_id for node in nodes]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
chunks = self._retrieval.get_chunks_by_docid(ref_doc_id)
self._retrieval.delete_by_ids([chunk["_id"] for chunk in chunks])
self._retrieval.commit_chunk()
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Get nodes for response."""
query_embedding = cast(List[float], query.query_embedding)
chunks = self._retrieval.search_content_chunks(owner="default",
query_str=query.query_str,
query_embedding=query_embedding,
doc_ids=query.node_ids,
limit=100,
return_json=False)
chunks_map = {}
for chunk in chunks:
chunk["metadata"] = json.loads(chunk["json_data"])
chunks_map[chunk["_id"]] = chunk["metadata"]
query_filter_fn = _build_metadata_filter_fn(
lambda node_id: chunks_map[node_id], query.filters
)
top_similarities = []
top_ids = []
counter = query.similarity_top_k
nodes = []
for chunk in chunks:
if query_filter_fn(chunk["_id"]):
if counter <= 0:
break
top_similarities.append(chunk["_score"])
top_ids.append(chunk["_id"])
try:
node = metadata_dict_to_node({"_node_content": chunk["metadata"]})
node.text = chunk["chunk"]
except Exception:
# TODO: Legacy support for old metadata format
node = TextNode(
text=chunk["raw_chunk"],
id_=chunk["_id"],
embedding=None,
metadata=chunk["metadata"],
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=chunk["doc_id"])
},
)
nodes.append(node)
counter -= 1
return VectorStoreQueryResult(nodes = nodes ,similarities=top_similarities, ids=top_ids)
| [
"llama_index.schema.ImageNode.parse_raw",
"llama_index.schema.TextNode.parse_raw",
"llama_index.vector_stores.utils.node_to_metadata_dict",
"llama_index.schema.ImageNode.class_name",
"llama_index.schema.IndexNode.parse_raw",
"llama_index.vector_stores.types.VectorStoreQueryResult",
"llama_index.schema.IndexNode.class_name",
"llama_index.schema.RelatedNodeInfo"
] | [((660, 687), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (677, 687), False, 'import logging\n'), ((2017, 2039), 'llama_index.schema.IndexNode.class_name', 'IndexNode.class_name', ([], {}), '()\n', (2037, 2039), False, 'from llama_index.schema import BaseNode, ImageNode, IndexNode, NodeRelationship, RelatedNodeInfo, TextNode\n'), ((2056, 2086), 'llama_index.schema.IndexNode.parse_raw', 'IndexNode.parse_raw', (['node_json'], {}), '(node_json)\n', (2075, 2086), False, 'from llama_index.schema import BaseNode, ImageNode, IndexNode, NodeRelationship, RelatedNodeInfo, TextNode\n'), ((2682, 2777), 'byzerllm.apps.llama_index.simple_retrieval.SimpleRetrieval', 'SimpleRetrieval', ([], {'llm': 'llm', 'retrieval': 'retrieval', 'chunk_collection': 'chunk_collection'}), '(llm=llm, retrieval=retrieval, chunk_collection=\n chunk_collection, **kwargs)\n', (2697, 2777), False, 'from byzerllm.apps.llama_index.simple_retrieval import SimpleRetrieval\n'), ((4646, 4686), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (4650, 4686), False, 'from typing import Any, Callable, Dict, List, Mapping, Optional, cast\n'), ((6630, 6709), 'llama_index.vector_stores.types.VectorStoreQueryResult', 'VectorStoreQueryResult', ([], {'nodes': 'nodes', 'similarities': 'top_similarities', 'ids': 'top_ids'}), '(nodes=nodes, similarities=top_similarities, ids=top_ids)\n', (6652, 6709), False, 'from llama_index.vector_stores.types import MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult\n'), ((2109, 2131), 'llama_index.schema.ImageNode.class_name', 'ImageNode.class_name', ([], {}), '()\n', (2129, 2131), False, 'from llama_index.schema import BaseNode, ImageNode, IndexNode, NodeRelationship, RelatedNodeInfo, TextNode\n'), ((2148, 2178), 'llama_index.schema.ImageNode.parse_raw', 'ImageNode.parse_raw', (['node_json'], {}), '(node_json)\n', (2167, 2178), False, 'from llama_index.schema import BaseNode, ImageNode, IndexNode, NodeRelationship, RelatedNodeInfo, TextNode\n'), ((2204, 2233), 'llama_index.schema.TextNode.parse_raw', 'TextNode.parse_raw', (['node_json'], {}), '(node_json)\n', (2222, 2233), False, 'from llama_index.schema import BaseNode, ImageNode, IndexNode, NodeRelationship, RelatedNodeInfo, TextNode\n'), ((3352, 3418), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': '(False)'}), '(node, remove_text=True, flat_metadata=False)\n', (3373, 3418), False, 'from llama_index.vector_stores.utils import node_to_metadata_dict\n'), ((5243, 5273), 'json.loads', 'json.loads', (["chunk['json_data']"], {}), "(chunk['json_data'])\n", (5253, 5273), False, 'import json\n'), ((6444, 6484), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': "chunk['doc_id']"}), "(node_id=chunk['doc_id'])\n", (6459, 6484), False, 'from llama_index.schema import BaseNode, ImageNode, IndexNode, NodeRelationship, RelatedNodeInfo, TextNode\n')] |
#!/usr/bin/env python3
# -*- coding utf-8 -*-
import openai
import yaml
from langchain.chat_models import ChatOpenAI
from langchain.text_splitter import SpacyTextSplitter
from llama_index import GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader
from llama_index.node_parser import SimpleNodeParser
'''
llama_index对于文章进行小结
本例中,相当于对文本分段,按树状结构逐步向上进行总结
使用spaCy分词模型
python -m spacy download zh_core_web_sm
'''
def get_api_key():
with open("config.yaml", "r", encoding="utf-8") as yaml_file:
yaml_data = yaml.safe_load(yaml_file)
openai.api_key = yaml_data["openai"]["api_key"]
if __name__ == '__main__':
get_api_key()
# 配置LLM
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", max_tokens=1024))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
# 分词模型,最长2048个token
text_splitter = SpacyTextSplitter(pipeline="zh_core_web_sm", chunk_size = 2048)
parser = SimpleNodeParser(text_splitter=text_splitter)
# 加载目录中的语料
documents = SimpleDirectoryReader('./data/mr_fujino').load_data()
# 获取语料中的nodes
nodes = parser.get_nodes_from_documents(documents)
# 最简单的索引结构GPTListIndex
list_index = GPTListIndex(nodes=nodes, service_context=service_context)
# 树状模型进行总结
response = list_index.query("下面鲁迅先生以第一人称‘我’写的内容,请你用中文总结一下:", response_mode="tree_summarize")
print(response)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTListIndex",
"llama_index.node_parser.SimpleNodeParser"
] | [((803, 860), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (831, 860), False, 'from llama_index import GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader\n'), ((906, 967), 'langchain.text_splitter.SpacyTextSplitter', 'SpacyTextSplitter', ([], {'pipeline': '"""zh_core_web_sm"""', 'chunk_size': '(2048)'}), "(pipeline='zh_core_web_sm', chunk_size=2048)\n", (923, 967), False, 'from langchain.text_splitter import SpacyTextSplitter\n'), ((983, 1028), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {'text_splitter': 'text_splitter'}), '(text_splitter=text_splitter)\n', (999, 1028), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1234, 1292), 'llama_index.GPTListIndex', 'GPTListIndex', ([], {'nodes': 'nodes', 'service_context': 'service_context'}), '(nodes=nodes, service_context=service_context)\n', (1246, 1292), False, 'from llama_index import GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader\n'), ((530, 555), 'yaml.safe_load', 'yaml.safe_load', (['yaml_file'], {}), '(yaml_file)\n', (544, 555), False, 'import yaml\n'), ((709, 779), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': '(1024)'}), "(temperature=0, model_name='gpt-3.5-turbo', max_tokens=1024)\n", (719, 779), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1061, 1102), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data/mr_fujino"""'], {}), "('./data/mr_fujino')\n", (1082, 1102), False, 'from llama_index import GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader\n')] |
from dotenv import load_dotenv
import os
from llama_index import SimpleDirectoryReader
from llama_index import node_parser
from llama_index.llms import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index import (
download_loader,
ServiceContext,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores import PineconeVectorStore
from llama_index.node_parser import SimpleNodeParser
import pinecone
load_dotenv()
pinecone.init(
api_key=os.environ["PINEECONE_API_KEY"], Environment=["PINECONE_ENVIRONMENT"]
)
if __name__ == "__main__":
print("Goining to ingest pinecone documentation")
# this will remove the html tag and only clean data will be availabe
UnstructuredReader = download_loader("UnstructuredReader")
dir_reader = SimpleDirectoryReader(
input_dir="./llamaindex-docs", file_extractor={".htm1": UnstructuredReader()}
)
documents = dir_reader.load_data()
node_parser = SimpleNodeParser.from_defaults(chunk_size=200, chunk_overlap=20)
# nodes = node_parser.get_nodes_from_documents(documents=documents)
llm = OpenAI(model="gpt-3.5 turbo", temperature=0)
embed_model = OpenAIEmbedding(model="text-embedding-ada-002", embed_batch_size=100)
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=embed_model, node_parser=node_parser
)
# pinecone index name
index_name = "documentation-helper"
pinecone_index = pinecone.index(index_name=index_name)
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
Storage_Context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents=documents,
service_context=service_context,
storage_context=Storage_Context,
show_progress=True,
)
print("finish ingesting...")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.StorageContext.from_defaults",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((456, 469), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (467, 469), False, 'from dotenv import load_dotenv\n'), ((470, 567), 'pinecone.init', 'pinecone.init', ([], {'api_key': "os.environ['PINEECONE_API_KEY']", 'Environment': "['PINECONE_ENVIRONMENT']"}), "(api_key=os.environ['PINEECONE_API_KEY'], Environment=[\n 'PINECONE_ENVIRONMENT'])\n", (483, 567), False, 'import pinecone\n'), ((748, 785), 'llama_index.download_loader', 'download_loader', (['"""UnstructuredReader"""'], {}), "('UnstructuredReader')\n", (763, 785), False, 'from llama_index import download_loader, ServiceContext, VectorStoreIndex, StorageContext\n'), ((976, 1040), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': '(200)', 'chunk_overlap': '(20)'}), '(chunk_size=200, chunk_overlap=20)\n', (1006, 1040), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1125, 1169), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5 turbo"""', 'temperature': '(0)'}), "(model='gpt-3.5 turbo', temperature=0)\n", (1131, 1169), False, 'from llama_index.llms import OpenAI\n'), ((1188, 1257), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'embed_batch_size': '(100)'}), "(model='text-embedding-ada-002', embed_batch_size=100)\n", (1203, 1257), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((1280, 1372), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model, node_parser=\n node_parser)\n', (1308, 1372), False, 'from llama_index import download_loader, ServiceContext, VectorStoreIndex, StorageContext\n'), ((1470, 1507), 'pinecone.index', 'pinecone.index', ([], {'index_name': 'index_name'}), '(index_name=index_name)\n', (1484, 1507), False, 'import pinecone\n'), ((1527, 1577), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'pinecone_index'}), '(pinecone_index=pinecone_index)\n', (1546, 1577), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((1600, 1655), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1628, 1655), False, 'from llama_index import download_loader, ServiceContext, VectorStoreIndex, StorageContext\n'), ((1669, 1812), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents', 'service_context': 'service_context', 'storage_context': 'Storage_Context', 'show_progress': '(True)'}), '(documents=documents, service_context=\n service_context, storage_context=Storage_Context, show_progress=True)\n', (1700, 1812), False, 'from llama_index import download_loader, ServiceContext, VectorStoreIndex, StorageContext\n')] |
from typing import Any, Dict, List, Optional, Sequence, Tuple
from llama_index.core.base.response.schema import RESPONSE_TYPE, Response
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.multi_modal import MultiModalVectorIndexRetriever
from llama_index.core.indices.query.base import BaseQueryEngine
from llama_index.core.indices.query.schema import QueryBundle, QueryType
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from llama_index.core.query_engine.citation_query_engine import CITATION_QA_TEMPLATE
from llama_index.core.prompts import PromptTemplate
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.schema import ImageNode, NodeWithScore
from mm_retriever import MultiModalQdrantRetriever
# rewrite CITATION_QA_TEMPLATE
TEXT_QA_TEMPLATE = PromptTemplate(
"Please provide an answer based solely on the provided sources. "
"When referencing information from a source, "
"cite the appropriate source(s) using their corresponding numbers. "
"Every answer should include at least one source citation. "
"Only cite a source when you are explicitly referencing it. "
"If none of the sources are helpful, you should indicate that. "
"Below are several numbered sources of information:"
"\n------\n"
"{context_str}"
"\n------\n"
"Query: {query_str}\n"
"Answer: "
)
IMAGE_QA_TEMPLATE = PromptTemplate(
"\n' + r.node.get_content() for r in image_nodes]),
text_context_response=text_context_response.text.replace("\n"," ").strip(),
image_context_response=i_q_response.text.replace("\n"," ").strip(),
)
return Response(
response=str(synthesized_response),
source_nodes=text_nodes+image_nodes,
metadata={
"query_str": query_bundle.query_str,
"model_config": self._multi_modal_llm.metadata,
},
)
# async def asynthesize(
# self,
# query_bundle: QueryBundle,
# nodes: List[NodeWithScore],
# additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
# ) -> RESPONSE_TYPE:
# image_nodes, text_nodes = _get_image_and_text_nodes(nodes)
# context_str = "\n\n".join([r.get_content() for r in text_nodes])
# fmt_prompt = self._text_qa_template.format(
# context_str=context_str, query_str=query_bundle.query_str
# )
# llm_response = await self._multi_modal_llm.acomplete(
# prompt=fmt_prompt,
# image_documents=image_nodes,
# )
# return Response(
# response=str(llm_response),
# source_nodes=nodes,
# metadata={"text_nodes": text_nodes, "image_nodes": image_nodes},
# )
async def asynthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
raise NotImplementedError("Async synthesize not implemented yet")
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
retrieval_results = self.retrieve(query_bundle)
retrieve_event.on_end(
payload={EventPayload.NODES: retrieval_results},
)
response = self.synthesize(
query_bundle,
retrieval_results=retrieval_results,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
raise NotImplementedError("Async query not implemented yet")
# async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
# """Answer a query."""
# with self.callback_manager.event(
# CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
# ) as query_event:
# with self.callback_manager.event(
# CBEventType.RETRIEVE,
# payload={EventPayload.QUERY_STR: query_bundle.query_str},
# ) as retrieve_event:
# nodes = await self.aretrieve(query_bundle)
# retrieve_event.on_end(
# payload={EventPayload.NODES: nodes},
# )
# response = await self.asynthesize(
# query_bundle,
# nodes=nodes,
# )
# query_event.on_end(payload={EventPayload.RESPONSE: response})
# return response
@property
def retriever(self) -> MultiModalVectorIndexRetriever:
"""Get the retriever object."""
return self._retriever
| [
"llama_index.core.prompts.PromptTemplate",
"llama_index.core.callbacks.base.CallbackManager"
] | [((1114, 1604), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only cite a source when you are explicitly referencing it. If none of the sources are helpful, you should indicate that. Below are several numbered sources of information:\n------\n{context_str}\n------\nQuery: {query_str}\nAnswer: """'], {}), '(\n """Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only cite a source when you are explicitly referencing it. If none of the sources are helpful, you should indicate that. Below are several numbered sources of information:\n------\n{context_str}\n------\nQuery: {query_str}\nAnswer: """\n )\n', (1128, 1604), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((1700, 2135), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""<image>\nCaption: {context_str}\n------\nYou are a smart agent who can answer questions based on external information. Above is an annotated image you retrieved. Please provide an answer to the query based solely on the image and caption. If the image is not helpful, you should indicate that. \nQuery: {query_str}\nNote: Don\'t include expressions like "This image appears to be XXX" in your answer.\nAnswer: """'], {}), '(\n """<image>\nCaption: {context_str}\n------\nYou are a smart agent who can answer questions based on external information. Above is an annotated image you retrieved. Please provide an answer to the query based solely on the image and caption. If the image is not helpful, you should indicate that. \nQuery: {query_str}\nNote: Don\'t include expressions like "This image appears to be XXX" in your answer.\nAnswer: """\n )\n', (1714, 2135), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((2223, 2517), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['(\n \'With the following sources related to your question from my knowledge base: \\n\\n\'\n + \'-\' * 50 +\n """\nParagraphs:\n\n{context_str}\n\nImages:\n{image_context_str}\n\n""" + \'-\' *\n 50 +\n """\nHere is my answer:\n\n{text_context_response}\n{image_context_response}"""\n )'], {}), '(\n """With the following sources related to your question from my knowledge base: \n\n"""\n + \'-\' * 50 +\n """\nParagraphs:\n\n{context_str}\n\nImages:\n{image_context_str}\n\n""" + \'-\' *\n 50 +\n """\nHere is my answer:\n\n{text_context_response}\n{image_context_response}"""\n )\n', (2237, 2517), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((4563, 4582), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (4578, 4582), False, 'from llama_index.core.callbacks.base import CallbackManager\n')] |
import asyncio
import os
import tempfile
import traceback
from datetime import datetime, date
from functools import partial
from pathlib import Path
import discord
import aiohttp
import openai
import tiktoken
from langchain.chat_models import ChatOpenAI
from llama_index import (
QuestionAnswerPrompt,
GPTVectorStoreIndex,
BeautifulSoupWebReader,
Document,
LLMPredictor,
OpenAIEmbedding,
SimpleDirectoryReader,
MockEmbedding,
ServiceContext,
get_response_synthesizer,
)
from llama_index.callbacks import CallbackManager, TokenCountingHandler
from llama_index.composability import QASummaryQueryEngineBuilder
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine
from llama_index.indices.query.query_transform import StepDecomposeQueryTransform
from llama_index.prompts.chat_prompts import CHAT_REFINE_PROMPT
from llama_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR
from langchain import OpenAI
from models.openai_model import Models
from services.environment_service import EnvService
MAX_SEARCH_PRICE = EnvService.get_max_search_price()
class Search:
def __init__(self, gpt_model, usage_service):
self.model = gpt_model
self.usage_service = usage_service
self.google_search_api_key = EnvService.get_google_search_api_key()
self.google_search_engine_id = EnvService.get_google_search_engine_id()
self.loop = asyncio.get_running_loop()
self.qaprompt = QuestionAnswerPrompt(
"You are formulating the response to a search query given the search prompt and the context. Context information is below. The text '<|endofstatement|>' is used to separate chat entries and make it easier for you to understand the context\n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Never say '<|endofstatement|>'\n"
"Given the context information and not prior knowledge, "
"answer the question, say that you were unable to answer the question if there is not sufficient context to formulate a decisive answer. If the prior knowledge/context was sufficient, simply repeat it. The search query was: {query_str}\n"
)
self.openai_key = os.getenv("OPENAI_TOKEN")
self.EMBED_CUTOFF = 2000
def add_search_index(self, index, user_id, query):
# Create a folder called "indexes/{USER_ID}" if it doesn't exist already
Path(f"{EnvService.save_path()}/indexes/{user_id}_search").mkdir(
parents=True, exist_ok=True
)
# Save the index to file under the user id
file = f"{date.today().month}_{date.today().day}_{query[:20]}"
index.storage_context.persist(
persist_dir=EnvService.save_path()
/ "indexes"
/ f"{str(user_id)}_search"
/ f"{file}"
)
def build_search_started_embed(self):
embed = discord.Embed(
title="Searching the web...",
description="Refining google search query...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_refined_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n"
+ f"`{refined_query}`"
+ "\nRetrieving links from google...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_links_retrieved_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nRetrieving webpages...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_determining_price_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nPre-determining index price...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_webpages_retrieved_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`" "\nIndexing...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_indexed_embed(self, refined_query):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nThinking about your question...",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def build_search_final_embed(self, refined_query, price):
embed = discord.Embed(
title="Searching the web...",
description="Refined query:\n" + f"`{refined_query}`"
"\nDone!\n||The total price was $" + price + "||",
color=discord.Color.blurple(),
)
embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png")
return embed
def index_webpage(self, url) -> list[Document]:
documents = BeautifulSoupWebReader(
website_extractor=DEFAULT_WEBSITE_EXTRACTOR
).load_data(urls=[url])
return documents
async def index_pdf(self, url) -> list[Document]:
# Download the PDF at the url and save it to a tempfile
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status == 200:
data = await response.read()
f = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
f.write(data)
f.close()
else:
raise ValueError("Could not download PDF")
# Get the file path of this tempfile.NamedTemporaryFile
# Save this temp file to an actual file that we can put into something else to read it
documents = SimpleDirectoryReader(input_files=[f.name]).load_data()
for document in documents:
document.extra_info = {"URL": url}
# Delete the temporary file
return documents
async def get_links(self, query, search_scope=2):
"""Search the web for a query"""
async with aiohttp.ClientSession() as session:
async with session.get(
f"https://www.googleapis.com/customsearch/v1?key={self.google_search_api_key}&cx={self.google_search_engine_id}&q={query}"
) as response:
if response.status == 200:
data = await response.json()
# Return a list of the top 2 links
return (
[item["link"] for item in data["items"][:search_scope]],
[item["link"] for item in data["items"]],
)
else:
raise ValueError(
"Error while retrieving links, the response returned "
+ str(response.status)
+ " with the message "
+ str(await response.text())
)
async def try_edit(self, message, embed):
try:
await message.edit(embed=embed)
except Exception:
traceback.print_exc()
pass
async def try_delete(self, message):
try:
await message.delete()
except Exception:
traceback.print_exc()
pass
async def search(
self,
ctx: discord.ApplicationContext,
query,
user_api_key,
search_scope,
nodes,
deep,
response_mode,
model,
multistep=False,
redo=None,
):
DEFAULT_SEARCH_NODES = 1
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
# Initialize the search cost
price = 0
if ctx:
in_progress_message = (
await ctx.respond(embed=self.build_search_started_embed())
if not redo
else await ctx.channel.send(embed=self.build_search_started_embed())
)
try:
llm_predictor_presearch = OpenAI(
max_tokens=50,
temperature=0.4,
presence_penalty=0.65,
model_name="text-davinci-003",
)
# Refine a query to send to google custom search API
prompt = f"You are to be given a search query for google. Change the query such that putting it into the Google Custom Search API will return the most relevant websites to assist in answering the original query. If the original query is inferring knowledge about the current day, insert the current day into the refined prompt. If the original query is inferring knowledge about the current month, insert the current month and year into the refined prompt. If the original query is inferring knowledge about the current year, insert the current year into the refined prompt. Generally, if the original query is inferring knowledge about something that happened recently, insert the current month into the refined query. Avoid inserting a day, month, or year for queries that purely ask about facts and about things that don't have much time-relevance. The current date is {str(datetime.now().date())}. Do not insert the current date if not neccessary. Respond with only the refined query for the original query. Don’t use punctuation or quotation marks.\n\nExamples:\n---\nOriginal Query: ‘Who is Harald Baldr?’\nRefined Query: ‘Harald Baldr biography’\n---\nOriginal Query: ‘What happened today with the Ohio train derailment?’\nRefined Query: ‘Ohio train derailment details {str(datetime.now().date())}’\n---\nOriginal Query: ‘Is copper in drinking water bad for you?’\nRefined Query: ‘copper in drinking water adverse effects’\n---\nOriginal Query: What's the current time in Mississauga?\nRefined Query: current time Mississauga\nNow, refine the user input query.\nOriginal Query: {query}\nRefined Query:"
query_refined = await llm_predictor_presearch.agenerate(
prompts=[prompt],
)
query_refined_text = query_refined.generations[0][0].text
await self.usage_service.update_usage(
query_refined.llm_output.get("token_usage").get("total_tokens"),
"davinci",
)
price += await self.usage_service.get_price(
query_refined.llm_output.get("token_usage").get("total_tokens"),
"davinci",
)
except Exception as e:
traceback.print_exc()
query_refined_text = query
if ctx:
await self.try_edit(
in_progress_message, self.build_search_refined_embed(query_refined_text)
)
# Get the links for the query
links, all_links = await self.get_links(
query_refined_text, search_scope=search_scope
)
if ctx:
await self.try_edit(
in_progress_message,
self.build_search_links_retrieved_embed(query_refined_text),
)
if all_links is None:
raise ValueError("The Google Search API returned an error.")
# For each link, crawl the page and get all the text that's not HTML garbage.
# Concatenate all the text for a given website into one string and save it into an array:
documents = []
for link in links:
# First, attempt a connection with a timeout of 3 seconds to the link, if the timeout occurs, don't
# continue to the document loading.
pdf = False
try:
async with aiohttp.ClientSession() as session:
async with session.get(link, timeout=1) as response:
# Add another entry to links from all_links if the link is not already in it to compensate for the failed request
if response.status not in [200, 203, 202, 204]:
for link2 in all_links:
if link2 not in links:
links.append(link2)
break
continue
# Follow redirects
elif response.status in [301, 302, 303, 307, 308]:
try:
links.append(response.url)
continue
except:
continue
else:
# Detect if the link is a PDF, if it is, we load it differently
if response.headers["Content-Type"] == "application/pdf":
pdf = True
except:
try:
# Try to add a link from all_links, this is kind of messy.
for link2 in all_links:
if link2 not in links:
links.append(link2)
break
except:
pass
continue
try:
if not pdf:
document = await self.loop.run_in_executor(
None, partial(self.index_webpage, link)
)
else:
document = await self.index_pdf(link)
[documents.append(doc) for doc in document]
except Exception as e:
traceback.print_exc()
if ctx:
await self.try_edit(
in_progress_message,
self.build_search_webpages_retrieved_embed(query_refined_text),
)
embedding_model = OpenAIEmbedding()
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=model))
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(model).encode, verbose=False
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor,
embed_model=embedding_model,
callback_manager=callback_manager,
)
# Check price
token_counter_mock = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(model).encode, verbose=False
)
callback_manager_mock = CallbackManager([token_counter_mock])
embed_model_mock = MockEmbedding(embed_dim=1536)
service_context_mock = ServiceContext.from_defaults(
embed_model=embed_model_mock, callback_manager=callback_manager_mock
)
self.loop.run_in_executor(
None,
partial(
GPTVectorStoreIndex.from_documents,
documents,
service_context=service_context_mock,
),
)
total_usage_price = await self.usage_service.get_price(
token_counter_mock.total_embedding_token_count, "embedding"
)
if total_usage_price > 1.00:
raise ValueError(
"Doing this search would be prohibitively expensive. Please try a narrower search scope."
)
if not deep:
index = await self.loop.run_in_executor(
None,
partial(
GPTVectorStoreIndex.from_documents,
documents,
service_context=service_context,
use_async=True,
),
)
# save the index to disk if not a redo
if not redo:
self.add_search_index(
index,
ctx.user.id
if isinstance(ctx, discord.ApplicationContext)
else ctx.author.id,
query,
)
else:
if ctx:
await self.try_edit(
in_progress_message,
self.build_search_determining_price_embed(query_refined_text),
)
graph_builder = QASummaryQueryEngineBuilder(service_context=service_context)
index = await self.loop.run_in_executor(
None,
partial(
graph_builder.build_from_documents,
documents,
),
)
if ctx:
await self.try_edit(
in_progress_message, self.build_search_indexed_embed(query_refined_text)
)
########################################
if not deep:
step_decompose_transform = StepDecomposeQueryTransform(
service_context.llm_predictor
)
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=nodes or DEFAULT_SEARCH_NODES,
)
response_synthesizer = get_response_synthesizer(
response_mode=response_mode,
use_async=True,
refine_template=CHAT_REFINE_PROMPT,
text_qa_template=self.qaprompt,
service_context=service_context,
)
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
multistep_query_engine = MultiStepQueryEngine(
query_engine=query_engine,
query_transform=step_decompose_transform,
index_summary="Provides information about everything you need to know about this topic, use this to answer the question.",
)
if multistep:
response = await self.loop.run_in_executor(
None,
partial(multistep_query_engine.query, query),
)
else:
response = await self.loop.run_in_executor(
None,
partial(query_engine.query, query),
)
else:
query_configs = [
{
"index_struct_type": "simple_dict",
"query_mode": "default",
"query_kwargs": {"similarity_top_k": 1},
},
{
"index_struct_type": "list",
"query_mode": "default",
"query_kwargs": {
"response_mode": "tree_summarize",
"use_async": True,
"verbose": True,
},
},
{
"index_struct_type": "tree",
"query_mode": "default",
"query_kwargs": {
"verbose": True,
"use_async": True,
"child_branch_factor": 2,
},
},
]
response = await self.loop.run_in_executor(
None,
partial(
index.query,
query,
),
)
await self.usage_service.update_usage(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
price += await self.usage_service.get_price(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
) + await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
if ctx:
await self.try_edit(
in_progress_message,
self.build_search_final_embed(query_refined_text, str(round(price, 6))),
)
return response, query_refined_text
| [
"llama_index.SimpleDirectoryReader",
"llama_index.get_response_synthesizer",
"llama_index.query_engine.MultiStepQueryEngine",
"llama_index.ServiceContext.from_defaults",
"llama_index.OpenAIEmbedding",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.MockEmbedding",
"llama_index.BeautifulSoupWebReader",
"llama_index.QuestionAnswerPrompt",
"llama_index.composability.QASummaryQueryEngineBuilder",
"llama_index.callbacks.CallbackManager",
"llama_index.indices.query.query_transform.StepDecomposeQueryTransform",
"llama_index.query_engine.RetrieverQueryEngine"
] | [((1135, 1168), 'services.environment_service.EnvService.get_max_search_price', 'EnvService.get_max_search_price', ([], {}), '()\n', (1166, 1168), False, 'from services.environment_service import EnvService\n'), ((1346, 1384), 'services.environment_service.EnvService.get_google_search_api_key', 'EnvService.get_google_search_api_key', ([], {}), '()\n', (1382, 1384), False, 'from services.environment_service import EnvService\n'), ((1424, 1464), 'services.environment_service.EnvService.get_google_search_engine_id', 'EnvService.get_google_search_engine_id', ([], {}), '()\n', (1462, 1464), False, 'from services.environment_service import EnvService\n'), ((1485, 1511), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (1509, 1511), False, 'import asyncio\n'), ((1536, 2191), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['"""You are formulating the response to a search query given the search prompt and the context. Context information is below. The text \'<|endofstatement|>\' is used to separate chat entries and make it easier for you to understand the context\n---------------------\n{context_str}\n---------------------\nNever say \'<|endofstatement|>\'\nGiven the context information and not prior knowledge, answer the question, say that you were unable to answer the question if there is not sufficient context to formulate a decisive answer. If the prior knowledge/context was sufficient, simply repeat it. The search query was: {query_str}\n"""'], {}), '(\n """You are formulating the response to a search query given the search prompt and the context. Context information is below. The text \'<|endofstatement|>\' is used to separate chat entries and make it easier for you to understand the context\n---------------------\n{context_str}\n---------------------\nNever say \'<|endofstatement|>\'\nGiven the context information and not prior knowledge, answer the question, say that you were unable to answer the question if there is not sufficient context to formulate a decisive answer. If the prior knowledge/context was sufficient, simply repeat it. The search query was: {query_str}\n"""\n )\n', (1556, 2191), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((2322, 2347), 'os.getenv', 'os.getenv', (['"""OPENAI_TOKEN"""'], {}), "('OPENAI_TOKEN')\n", (2331, 2347), False, 'import os\n'), ((14651, 14668), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (14666, 14668), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((14920, 14952), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (14935, 14952), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((14980, 15106), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embedding_model', 'callback_manager': 'callback_manager'}), '(llm_predictor=llm_predictor, embed_model=\n embedding_model, callback_manager=callback_manager)\n', (15008, 15106), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((15344, 15381), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter_mock]'], {}), '([token_counter_mock])\n', (15359, 15381), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((15409, 15438), 'llama_index.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(1536)'}), '(embed_dim=1536)\n', (15422, 15438), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((15470, 15573), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model_mock', 'callback_manager': 'callback_manager_mock'}), '(embed_model=embed_model_mock, callback_manager\n =callback_manager_mock)\n', (15498, 15573), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((5959, 5982), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (5980, 5982), False, 'import aiohttp\n'), ((6864, 6887), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (6885, 6887), False, 'import aiohttp\n'), ((8966, 9063), 'langchain.OpenAI', 'OpenAI', ([], {'max_tokens': '(50)', 'temperature': '(0.4)', 'presence_penalty': '(0.65)', 'model_name': '"""text-davinci-003"""'}), "(max_tokens=50, temperature=0.4, presence_penalty=0.65, model_name=\n 'text-davinci-003')\n", (8972, 9063), False, 'from langchain import OpenAI\n'), ((15656, 15753), 'functools.partial', 'partial', (['GPTVectorStoreIndex.from_documents', 'documents'], {'service_context': 'service_context_mock'}), '(GPTVectorStoreIndex.from_documents, documents, service_context=\n service_context_mock)\n', (15663, 15753), False, 'from functools import partial\n'), ((17056, 17116), 'llama_index.composability.QASummaryQueryEngineBuilder', 'QASummaryQueryEngineBuilder', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (17083, 17116), False, 'from llama_index.composability import QASummaryQueryEngineBuilder\n'), ((17602, 17660), 'llama_index.indices.query.query_transform.StepDecomposeQueryTransform', 'StepDecomposeQueryTransform', (['service_context.llm_predictor'], {}), '(service_context.llm_predictor)\n', (17629, 17660), False, 'from llama_index.indices.query.query_transform import StepDecomposeQueryTransform\n'), ((17716, 17801), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(nodes or DEFAULT_SEARCH_NODES)'}), '(index=index, similarity_top_k=nodes or\n DEFAULT_SEARCH_NODES)\n', (17736, 17801), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((17881, 18059), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {'response_mode': 'response_mode', 'use_async': '(True)', 'refine_template': 'CHAT_REFINE_PROMPT', 'text_qa_template': 'self.qaprompt', 'service_context': 'service_context'}), '(response_mode=response_mode, use_async=True,\n refine_template=CHAT_REFINE_PROMPT, text_qa_template=self.qaprompt,\n service_context=service_context)\n', (17905, 18059), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((18175, 18264), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (18195, 18264), False, 'from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine\n'), ((18327, 18554), 'llama_index.query_engine.MultiStepQueryEngine', 'MultiStepQueryEngine', ([], {'query_engine': 'query_engine', 'query_transform': 'step_decompose_transform', 'index_summary': '"""Provides information about everything you need to know about this topic, use this to answer the question."""'}), "(query_engine=query_engine, query_transform=\n step_decompose_transform, index_summary=\n 'Provides information about everything you need to know about this topic, use this to answer the question.'\n )\n", (18347, 18554), False, 'from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine\n'), ((3141, 3164), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (3162, 3164), False, 'import discord\n'), ((3542, 3565), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (3563, 3565), False, 'import discord\n'), ((3928, 3951), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (3949, 3951), False, 'import discord\n'), ((4325, 4348), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (4346, 4348), False, 'import discord\n'), ((4692, 4715), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (4713, 4715), False, 'import discord\n'), ((5080, 5103), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (5101, 5103), False, 'import discord\n'), ((5487, 5510), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (5508, 5510), False, 'import discord\n'), ((5684, 5751), 'llama_index.BeautifulSoupWebReader', 'BeautifulSoupWebReader', ([], {'website_extractor': 'DEFAULT_WEBSITE_EXTRACTOR'}), '(website_extractor=DEFAULT_WEBSITE_EXTRACTOR)\n', (5706, 5751), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((6549, 6592), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[f.name]'}), '(input_files=[f.name])\n', (6570, 6592), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((7897, 7918), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7916, 7918), False, 'import traceback\n'), ((8064, 8085), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8083, 8085), False, 'import traceback\n'), ((11410, 11431), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (11429, 11431), False, 'import traceback\n'), ((14711, 14754), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': 'model'}), '(temperature=0, model_name=model)\n', (14721, 14754), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2711, 2723), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2721, 2723), False, 'from datetime import datetime, date\n'), ((2732, 2744), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2742, 2744), False, 'from datetime import datetime, date\n'), ((6164, 6220), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".pdf"""', 'delete': '(False)'}), "(suffix='.pdf', delete=False)\n", (6191, 6220), False, 'import tempfile\n'), ((12525, 12548), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (12546, 12548), False, 'import aiohttp\n'), ((14421, 14442), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14440, 14442), False, 'import traceback\n'), ((14825, 14859), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['model'], {}), '(model)\n', (14852, 14859), False, 'import tiktoken\n'), ((15245, 15279), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['model'], {}), '(model)\n', (15272, 15279), False, 'import tiktoken\n'), ((16269, 16377), 'functools.partial', 'partial', (['GPTVectorStoreIndex.from_documents', 'documents'], {'service_context': 'service_context', 'use_async': '(True)'}), '(GPTVectorStoreIndex.from_documents, documents, service_context=\n service_context, use_async=True)\n', (16276, 16377), False, 'from functools import partial\n'), ((17209, 17263), 'functools.partial', 'partial', (['graph_builder.build_from_documents', 'documents'], {}), '(graph_builder.build_from_documents, documents)\n', (17216, 17263), False, 'from functools import partial\n'), ((19989, 20016), 'functools.partial', 'partial', (['index.query', 'query'], {}), '(index.query, query)\n', (19996, 20016), False, 'from functools import partial\n'), ((18735, 18779), 'functools.partial', 'partial', (['multistep_query_engine.query', 'query'], {}), '(multistep_query_engine.query, query)\n', (18742, 18779), False, 'from functools import partial\n'), ((18923, 18957), 'functools.partial', 'partial', (['query_engine.query', 'query'], {}), '(query_engine.query, query)\n', (18930, 18957), False, 'from functools import partial\n'), ((2534, 2556), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (2554, 2556), False, 'from services.environment_service import EnvService\n'), ((2828, 2850), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (2848, 2850), False, 'from services.environment_service import EnvService\n'), ((14174, 14207), 'functools.partial', 'partial', (['self.index_webpage', 'link'], {}), '(self.index_webpage, link)\n', (14181, 14207), False, 'from functools import partial\n'), ((10093, 10107), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10105, 10107), False, 'from datetime import datetime, date\n'), ((10513, 10527), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10525, 10527), False, 'from datetime import datetime, date\n')] |
from typing import List
from llama_index import Document, StorageContext, VectorStoreIndex, load_index_from_storage
import os
def create_vector(service_context, vector_storage_dir: str, doc_loader: callable) -> List[Document]:
if not os.path.exists(vector_storage_dir):
documents = doc_loader()
print(f"About to build vector-index over {len(documents)} document(s) ...")
vector_index = VectorStoreIndex.from_documents(
documents,
service_context=service_context
)
print(f"Storing vector-index to {vector_storage_dir} ...")
vector_index.storage_context.persist(persist_dir=vector_storage_dir)
else:
print(f"Loading vector-index from storage from {vector_storage_dir} ...")
storage_context_vector = StorageContext.from_defaults(persist_dir=vector_storage_dir)
vector_index = load_index_from_storage(
service_context=service_context,
storage_context=storage_context_vector
)
return vector_index
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults"
] | [((240, 274), 'os.path.exists', 'os.path.exists', (['vector_storage_dir'], {}), '(vector_storage_dir)\n', (254, 274), False, 'import os\n'), ((416, 491), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (447, 491), False, 'from llama_index import Document, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((795, 855), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'vector_storage_dir'}), '(persist_dir=vector_storage_dir)\n', (823, 855), False, 'from llama_index import Document, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((879, 980), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'service_context': 'service_context', 'storage_context': 'storage_context_vector'}), '(service_context=service_context, storage_context=\n storage_context_vector)\n', (902, 980), False, 'from llama_index import Document, StorageContext, VectorStoreIndex, load_index_from_storage\n')] |
import streamlit as st
import torch
from glob import glob
from pathlib import Path
from llama_index.prompts.prompts import SimpleInputPrompt
from llama_index import (
set_global_service_context,
ServiceContext,
VectorStoreIndex,
download_loader,
)
from langchain.embeddings import HuggingFaceEmbeddings
from llama_index.embeddings import LangchainEmbedding
from transformers import AutoTokenizer, AutoModelForCausalLM
from llama_index.llms import HuggingFaceLLM
PyMuPDFReader = download_loader("PyMuPDFReader")
loader = PyMuPDFReader()
model_name = "meta-llama/Llama-2-7b-chat-hf"
auth_token = "*******************************"
system_prompt = """<s>[INST] <<SYS>>
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe.
Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.
Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct.
If you don't know the answer to a question, please don't share false information. <</SYS>>
""" # Llama2's official system prompt
@st.cache_resource
def model_tokenizer_embedder(model_name, auth_token):
tokenizer = AutoTokenizer.from_pretrained(
model_name, cache_dir="./model/", use_auth_token=auth_token
)
model = AutoModelForCausalLM.from_pretrained(
model_name,
cache_dir="./model/",
use_auth_token=auth_token,
torch_dtype=torch.float16,
load_in_8bit=True,
)
embedding_llm = LangchainEmbedding(
HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
)
return tokenizer, model, embedding_llm
def load_documents(directory):
documents = []
for item_path in glob(directory + "*.pdf"):
# documents.extend(loader.load(file_path=Path(item_path), metadata=True))
documents.extend(loader.load(file_path=item_path, metadata=True))
return documents
tokenizer, model, embedding_llm = model_tokenizer_embedder(model_name, auth_token)
query_wrapper_prompt = SimpleInputPrompt("{query_str} [/INST]")
llm = HuggingFaceLLM(
context_window=4096,
max_new_tokens=256,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
model=model,
tokenizer=tokenizer,
)
service_context = ServiceContext.from_defaults(
chunk_size=1024, llm=llm, embed_model=embedding_llm
)
set_global_service_context(service_context)
documents = load_documents("./documents/")
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
st.title("Llama2 RAG")
prompt = st.text_input("Enter your prompt")
if prompt:
response = query_engine.query(prompt)
st.write(response.response)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.HuggingFaceLLM",
"llama_index.set_global_service_context",
"llama_index.prompts.prompts.SimpleInputPrompt"
] | [((495, 527), 'llama_index.download_loader', 'download_loader', (['"""PyMuPDFReader"""'], {}), "('PyMuPDFReader')\n", (510, 527), False, 'from llama_index import set_global_service_context, ServiceContext, VectorStoreIndex, download_loader\n'), ((2176, 2216), 'llama_index.prompts.prompts.SimpleInputPrompt', 'SimpleInputPrompt', (['"""{query_str} [/INST]"""'], {}), "('{query_str} [/INST]')\n", (2193, 2216), False, 'from llama_index.prompts.prompts import SimpleInputPrompt\n'), ((2224, 2394), 'llama_index.llms.HuggingFaceLLM', 'HuggingFaceLLM', ([], {'context_window': '(4096)', 'max_new_tokens': '(256)', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt', 'model': 'model', 'tokenizer': 'tokenizer'}), '(context_window=4096, max_new_tokens=256, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, model=model,\n tokenizer=tokenizer)\n', (2238, 2394), False, 'from llama_index.llms import HuggingFaceLLM\n'), ((2432, 2518), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(1024)', 'llm': 'llm', 'embed_model': 'embedding_llm'}), '(chunk_size=1024, llm=llm, embed_model=\n embedding_llm)\n', (2460, 2518), False, 'from llama_index import set_global_service_context, ServiceContext, VectorStoreIndex, download_loader\n'), ((2520, 2563), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (2546, 2563), False, 'from llama_index import set_global_service_context, ServiceContext, VectorStoreIndex, download_loader\n'), ((2616, 2658), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2647, 2658), False, 'from llama_index import set_global_service_context, ServiceContext, VectorStoreIndex, download_loader\n'), ((2699, 2721), 'streamlit.title', 'st.title', (['"""Llama2 RAG"""'], {}), "('Llama2 RAG')\n", (2707, 2721), True, 'import streamlit as st\n'), ((2732, 2766), 'streamlit.text_input', 'st.text_input', (['"""Enter your prompt"""'], {}), "('Enter your prompt')\n", (2745, 2766), True, 'import streamlit as st\n'), ((1330, 1424), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {'cache_dir': '"""./model/"""', 'use_auth_token': 'auth_token'}), "(model_name, cache_dir='./model/',\n use_auth_token=auth_token)\n", (1359, 1424), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM\n'), ((1447, 1594), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_name'], {'cache_dir': '"""./model/"""', 'use_auth_token': 'auth_token', 'torch_dtype': 'torch.float16', 'load_in_8bit': '(True)'}), "(model_name, cache_dir='./model/',\n use_auth_token=auth_token, torch_dtype=torch.float16, load_in_8bit=True)\n", (1483, 1594), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM\n'), ((1863, 1888), 'glob.glob', 'glob', (["(directory + '*.pdf')"], {}), "(directory + '*.pdf')\n", (1867, 1888), False, 'from glob import glob\n'), ((2824, 2851), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2832, 2851), True, 'import streamlit as st\n'), ((1687, 1739), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (1708, 1739), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n')] |
from langchain.agents import load_tools, Tool, tool
from langchain.agents import initialize_agent
from langchain.llms import OpenAI, OpenAIChat
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Qdrant, Chroma, Pinecone, FAISS
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.chains import RetrievalQA
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, Document
from llama_index.readers.qdrant import QdrantReader
from llama_index.optimization.optimizer import SentenceEmbeddingOptimizer
from gptcache import cache
from gptcache.adapter import openai
import pinecone, warnings, yaml, os
warnings.filterwarnings("ignore")
class KapwingVectorStore:
def __init__(self, chunk_size=10000,
model_name="gpt-4",
temperature=0,
filepath='data/query_instructions.txt'):
cache.init()
cache.set_openai_key()
with open("config/config.yaml", 'r') as stream:
config = yaml.safe_load(stream)
os.environ["OPENAI_API_KEY"] = config['OPENAI_API_KEY']
self.qdrant_host = config['QDRANT_HOST']
self.qdrant_api_key = config['QDRANT_API_KEY']
self.pcone_api_key = config['PINECONE_API_KEY']
self.pcone_env = config['PINECONE_HOST']
self.text_splitter = CharacterTextSplitter(separator="\n\n\n", chunk_size=chunk_size, chunk_overlap=0)
self.loader = TextLoader(filepath)
self.docs = self.text_splitter.split_documents(self.loader.load())
self.embeddings = OpenAIEmbeddings()
self.llm_ = OpenAIChat(model_name=model_name, temperature=temperature)
with open("config/prompts.yaml", 'r') as stream:
prompts = yaml.safe_load(stream)
self.prefix = prompts['vectorstore_prefix']
self.suffix = prompts['vectorstore_suffix']
self.simple_scripts_prompt = prompts['simple_scripts_prompt']
self.mask_prompt = prompts['mask_prompt']
self.fast_func_prompt = prompts['fast_func_prompt']
def get_qdrant(self):
self.qdrant_tool_db = Qdrant.from_documents(self.docs, self.embeddings, host=self.qdrant_host, prefer_grpc=True, api_key=self.qdrant_api_key).as_retriever()
self.qdrant_tool_vec = RetrievalQA.from_llm(llm=self.llm_, retriever=self.qdrant_tool_db)
return self.qdrant_tool_db, self.qdrant_tool_vec
def get_faiss(self):
self.faiss_tool_db = FAISS.from_documents(self.docs, self.embeddings).as_retriever()
self.faiss_tool_vec = RetrievalQA.from_llm(llm=self.llm_, retriever=self.faiss_tool_db)
return self.faiss_tool_db, self.faiss_tool_vec
def get_chroma(self):
self.chroma_tool_db = Chroma.from_documents(self.docs, self.embeddings, collection_name="tools").as_retriever()
self.chroma_tool_vec = RetrievalQA.from_llm(llm=self.llm_, retriever=self.chroma_tool_db)
return self.chroma_tool_db, self.chroma_tool_vec
def get_pcone(self):
pinecone.init(api_key=self.pcone_api_key, environment=self.pcone_env)
self.pcone_tool_db = Pinecone.from_documents(self.docs, self.embeddings, index_name="tool-db").as_retriever()
self.pcone_tool_vec = RetrievalQA.from_llm(llm=self.llm_, retriever=self.pcone_tool_db)
return self.pcone_tool_db, self.pcone_tool_vec
def set_gpt_index(self):
self.gpt_docs = [Document(doc.page_content) for doc in self.docs]
self.tool_index = GPTSimpleVectorIndex.from_documents(self.gpt_docs)
def gpt_index_query(self, query):
res = self.tool_index.query(self.prefix.format(query=query) + self.mask_prompt + self.suffix,
similarity_top_k=3
# optimizer=SentenceEmbeddingOptimizer(percentile_cutoff=0.3)
)
return res
def gpt_index_funcs(self, query):
res = self.tool_index.query(self.fast_func_prompt.format(query=query),
similarity_top_k=3
# optimizer=SentenceEmbeddingOptimizer(percentile_cutoff=0.3)
)
return res
def gpt_index_scripts_query(self, query):
res = self.tool_index.query(self.simple_scripts_prompt.format(query=query) + self.mask_prompt,
# similarity_top_k=3,
# optimizer=SentenceEmbeddingOptimizer(percentile_cutoff=0.3)
)
return res
def qdrant_query(self, query):
res = self.qdrant_tool_vec.run(self.prefix.format(query=query) + self.suffix)
return res
def pcone_query(self, query):
res = self.pcone_tool_vec.run(self.prefix.format(query=query) + self.suffix)
return res
def faiss_query(self, query):
res = self.faiss_tool_vec.run(self.prefix.format(query=query) + self.suffix)
return res
def faiss_scripts_query(self, query):
res = self.faiss_tool_vec.run(self.simple_scripts_prompt.format(query=query) + self.mask_prompt)
return res
def main():
query = input("QUERY: ")
vec = KapwingVectorStore()
vec.get_faiss()
res = vec.faiss_query(query)
print(res)
if __name__ == "__main__":
main()
| [
"llama_index.GPTSimpleVectorIndex.from_documents",
"llama_index.Document"
] | [((721, 754), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (744, 754), False, 'import pinecone, warnings, yaml, os\n'), ((978, 990), 'gptcache.cache.init', 'cache.init', ([], {}), '()\n', (988, 990), False, 'from gptcache import cache\n'), ((999, 1021), 'gptcache.cache.set_openai_key', 'cache.set_openai_key', ([], {}), '()\n', (1019, 1021), False, 'from gptcache import cache\n'), ((1435, 1520), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n\n\n"""', 'chunk_size': 'chunk_size', 'chunk_overlap': '(0)'}), "(separator='\\n\\n\\n', chunk_size=chunk_size,\n chunk_overlap=0)\n", (1456, 1520), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1539, 1559), 'langchain.document_loaders.TextLoader', 'TextLoader', (['filepath'], {}), '(filepath)\n', (1549, 1559), False, 'from langchain.document_loaders import TextLoader\n'), ((1662, 1680), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1678, 1680), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1701, 1759), 'langchain.llms.OpenAIChat', 'OpenAIChat', ([], {'model_name': 'model_name', 'temperature': 'temperature'}), '(model_name=model_name, temperature=temperature)\n', (1711, 1759), False, 'from langchain.llms import OpenAI, OpenAIChat\n'), ((2379, 2445), 'langchain.chains.RetrievalQA.from_llm', 'RetrievalQA.from_llm', ([], {'llm': 'self.llm_', 'retriever': 'self.qdrant_tool_db'}), '(llm=self.llm_, retriever=self.qdrant_tool_db)\n', (2399, 2445), False, 'from langchain.chains import RetrievalQA\n'), ((2652, 2717), 'langchain.chains.RetrievalQA.from_llm', 'RetrievalQA.from_llm', ([], {'llm': 'self.llm_', 'retriever': 'self.faiss_tool_db'}), '(llm=self.llm_, retriever=self.faiss_tool_db)\n', (2672, 2717), False, 'from langchain.chains import RetrievalQA\n'), ((2951, 3017), 'langchain.chains.RetrievalQA.from_llm', 'RetrievalQA.from_llm', ([], {'llm': 'self.llm_', 'retriever': 'self.chroma_tool_db'}), '(llm=self.llm_, retriever=self.chroma_tool_db)\n', (2971, 3017), False, 'from langchain.chains import RetrievalQA\n'), ((3109, 3178), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'self.pcone_api_key', 'environment': 'self.pcone_env'}), '(api_key=self.pcone_api_key, environment=self.pcone_env)\n', (3122, 3178), False, 'import pinecone, warnings, yaml, os\n'), ((3327, 3392), 'langchain.chains.RetrievalQA.from_llm', 'RetrievalQA.from_llm', ([], {'llm': 'self.llm_', 'retriever': 'self.pcone_tool_db'}), '(llm=self.llm_, retriever=self.pcone_tool_db)\n', (3347, 3392), False, 'from langchain.chains import RetrievalQA\n'), ((3578, 3628), 'llama_index.GPTSimpleVectorIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', (['self.gpt_docs'], {}), '(self.gpt_docs)\n', (3613, 3628), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, Document\n'), ((1108, 1130), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (1122, 1130), False, 'import pinecone, warnings, yaml, os\n'), ((1848, 1870), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (1862, 1870), False, 'import pinecone, warnings, yaml, os\n'), ((3503, 3529), 'llama_index.Document', 'Document', (['doc.page_content'], {}), '(doc.page_content)\n', (3511, 3529), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, Document\n'), ((2213, 2336), 'langchain.vectorstores.Qdrant.from_documents', 'Qdrant.from_documents', (['self.docs', 'self.embeddings'], {'host': 'self.qdrant_host', 'prefer_grpc': '(True)', 'api_key': 'self.qdrant_api_key'}), '(self.docs, self.embeddings, host=self.qdrant_host,\n prefer_grpc=True, api_key=self.qdrant_api_key)\n', (2234, 2336), False, 'from langchain.vectorstores import Qdrant, Chroma, Pinecone, FAISS\n'), ((2558, 2606), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['self.docs', 'self.embeddings'], {}), '(self.docs, self.embeddings)\n', (2578, 2606), False, 'from langchain.vectorstores import Qdrant, Chroma, Pinecone, FAISS\n'), ((2830, 2904), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['self.docs', 'self.embeddings'], {'collection_name': '"""tools"""'}), "(self.docs, self.embeddings, collection_name='tools')\n", (2851, 2904), False, 'from langchain.vectorstores import Qdrant, Chroma, Pinecone, FAISS\n'), ((3208, 3281), 'langchain.vectorstores.Pinecone.from_documents', 'Pinecone.from_documents', (['self.docs', 'self.embeddings'], {'index_name': '"""tool-db"""'}), "(self.docs, self.embeddings, index_name='tool-db')\n", (3231, 3281), False, 'from langchain.vectorstores import Qdrant, Chroma, Pinecone, FAISS\n')] |
import os, config, openai
from llama_index import StorageContext, load_index_from_storage
openai.api_key = config.OPENAI_API_KEY
os.environ['OPENAI_API_KEY'] = config.OPENAI_API_KEY
# new version of llama index uses StorageContext instead of load_from_disk
# index = GPTSimpleVectorIndex.load_from_disk('index_news.json')
storage_context = StorageContext.from_defaults(persist_dir="./storage")
index = load_index_from_storage(storage_context)
# new version of llama index uses query_engine.query()
query_engine = index.as_query_engine()
response = query_engine.query("What are some near-term risks to Nvidia?")
print(response) | [
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults"
] | [((342, 395), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (370, 395), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((404, 444), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (427, 444), False, 'from llama_index import StorageContext, load_index_from_storage\n')] |
import os
from dotenv import load_dotenv, find_dotenv
import numpy as np
import nest_asyncio
nest_asyncio.apply()
def get_openai_api_key():
_ = load_dotenv(find_dotenv())
return os.getenv("OPENAI_API_KEY")
from trulens_eval import (
Feedback,
TruLlama,
OpenAI
)
from trulens_eval.feedback import Groundedness
def get_prebuilt_trulens_recorder(query_engine, app_id):
openai = OpenAI()
qa_relevance = (
Feedback(openai.relevance_with_cot_reasons, name="Answer Relevance")
.on_input_output()
)
qs_relevance = (
Feedback(openai.relevance_with_cot_reasons, name = "Context Relevance")
.on_input()
.on(TruLlama.select_source_nodes().node.text)
.aggregate(np.mean)
)
# grounded = Groundedness(groundedness_provider=openai, summarize_provider=openai)
grounded = Groundedness(groundedness_provider=openai)
groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons, name="Groundedness")
.on(TruLlama.select_source_nodes().node.text)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
feedbacks = [qa_relevance, qs_relevance, groundedness]
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks=feedbacks
)
return tru_recorder
from llama_index import ServiceContext, VectorStoreIndex, StorageContext
from llama_index.node_parser import SentenceWindowNodeParser
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor
from llama_index.indices.postprocessor import SentenceTransformerRerank
from llama_index import load_index_from_storage
import os
def build_sentence_window_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
sentence_window_size=3,
save_dir="sentence_index",
):
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=sentence_window_size,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
sentence_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
)
if not os.path.exists(save_dir):
sentence_index = VectorStoreIndex.from_documents(
documents, service_context=sentence_context
)
sentence_index.storage_context.persist(persist_dir=save_dir)
else:
sentence_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=sentence_context,
)
return sentence_index
def get_sentence_window_query_engine(
sentence_index,
similarity_top_k=6,
rerank_top_n=2,
):
# define postprocessors
postproc = MetadataReplacementPostProcessor(target_metadata_key="window")
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
sentence_window_engine = sentence_index.as_query_engine(
similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank]
)
return sentence_window_engine
from llama_index.node_parser import HierarchicalNodeParser
from llama_index.node_parser import get_leaf_nodes
from llama_index import StorageContext
from llama_index.retrievers import AutoMergingRetriever
from llama_index.indices.postprocessor import SentenceTransformerRerank
from llama_index.query_engine import RetrieverQueryEngine
def build_automerging_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="merging_index",
chunk_sizes=None,
):
chunk_sizes = chunk_sizes or [2048, 512, 128]
node_parser = HierarchicalNodeParser.from_defaults(chunk_sizes=chunk_sizes)
nodes = node_parser.get_nodes_from_documents(documents)
leaf_nodes = get_leaf_nodes(nodes)
merging_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
if not os.path.exists(save_dir):
automerging_index = VectorStoreIndex(
leaf_nodes, storage_context=storage_context, service_context=merging_context
)
automerging_index.storage_context.persist(persist_dir=save_dir)
else:
automerging_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=merging_context,
)
return automerging_index
def get_automerging_query_engine(
automerging_index,
similarity_top_k=12,
rerank_top_n=6,
):
base_retriever = automerging_index.as_retriever(similarity_top_k=similarity_top_k)
retriever = AutoMergingRetriever(
base_retriever, automerging_index.storage_context, verbose=True
)
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
auto_merging_engine = RetrieverQueryEngine.from_args(
retriever, node_postprocessors=[rerank]
)
return auto_merging_engine
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.AutoMergingRetriever",
"llama_index.node_parser.HierarchicalNodeParser.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.indices.postprocessor.SentenceTransformerRerank",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.get_leaf_nodes",
"llama_index.StorageContext.from_defaults",
"llama_index.query_engine.RetrieverQueryEngine.from_args",
"llama_index.indices.postprocessor.MetadataReplacementPostProcessor"
] | [((96, 116), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (114, 116), False, 'import nest_asyncio\n'), ((192, 219), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (201, 219), False, 'import os\n'), ((408, 416), 'trulens_eval.OpenAI', 'OpenAI', ([], {}), '()\n', (414, 416), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((862, 904), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'openai'}), '(groundedness_provider=openai)\n', (874, 904), False, 'from trulens_eval.feedback import Groundedness\n'), ((1245, 1303), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': 'feedbacks'}), '(query_engine, app_id=app_id, feedbacks=feedbacks)\n', (1253, 1303), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((1954, 2104), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': 'sentence_window_size', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=sentence_window_size,\n window_metadata_key='window', original_text_metadata_key='original_text')\n", (1992, 2104), False, 'from llama_index.node_parser import SentenceWindowNodeParser\n'), ((2155, 2247), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model, node_parser=\n node_parser)\n', (2183, 2247), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((2861, 2923), 'llama_index.indices.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (2893, 2923), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor\n'), ((2937, 3014), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (2962, 3014), False, 'from llama_index.indices.postprocessor import SentenceTransformerRerank\n'), ((3777, 3838), 'llama_index.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': 'chunk_sizes'}), '(chunk_sizes=chunk_sizes)\n', (3813, 3838), False, 'from llama_index.node_parser import HierarchicalNodeParser\n'), ((3916, 3937), 'llama_index.node_parser.get_leaf_nodes', 'get_leaf_nodes', (['nodes'], {}), '(nodes)\n', (3930, 3937), False, 'from llama_index.node_parser import get_leaf_nodes\n'), ((3960, 4022), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (3988, 4022), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((4068, 4098), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (4096, 4098), False, 'from llama_index import StorageContext\n'), ((4825, 4914), 'llama_index.retrievers.AutoMergingRetriever', 'AutoMergingRetriever', (['base_retriever', 'automerging_index.storage_context'], {'verbose': '(True)'}), '(base_retriever, automerging_index.storage_context,\n verbose=True)\n', (4845, 4914), False, 'from llama_index.retrievers import AutoMergingRetriever\n'), ((4938, 5015), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (4963, 5015), False, 'from llama_index.indices.postprocessor import SentenceTransformerRerank\n'), ((5056, 5127), 'llama_index.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'node_postprocessors': '[rerank]'}), '(retriever, node_postprocessors=[rerank])\n', (5086, 5127), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((165, 178), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (176, 178), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((2285, 2309), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (2299, 2309), False, 'import os\n'), ((2336, 2412), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'sentence_context'}), '(documents, service_context=sentence_context)\n', (2367, 2412), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((4161, 4185), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4175, 4185), False, 'import os\n'), ((4215, 4313), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['leaf_nodes'], {'storage_context': 'storage_context', 'service_context': 'merging_context'}), '(leaf_nodes, storage_context=storage_context,\n service_context=merging_context)\n', (4231, 4313), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((447, 515), 'trulens_eval.Feedback', 'Feedback', (['openai.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(openai.relevance_with_cot_reasons, name='Answer Relevance')\n", (455, 515), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((2576, 2626), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (2604, 2626), False, 'from llama_index import StorageContext\n'), ((4479, 4529), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (4507, 4529), False, 'from llama_index import StorageContext\n'), ((683, 713), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (711, 713), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((579, 648), 'trulens_eval.Feedback', 'Feedback', (['openai.relevance_with_cot_reasons'], {'name': '"""Context Relevance"""'}), "(openai.relevance_with_cot_reasons, name='Context Relevance')\n", (587, 648), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((935, 1012), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (943, 1012), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((1029, 1059), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1057, 1059), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n')] |
import streamlit as st
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.llms import Perplexity
from llama_index import SimpleDirectoryReader
@st.cache_resource(show_spinner=True)
def load_data():
with st.spinner(text="Die LSB-Informationen werden indiziert. Das dauert nur ein paar Augenblicke."):
reader = SimpleDirectoryReader(input_dir="chatbot/data", recursive=True)
docs = reader.load_data()
service_context = ServiceContext.from_defaults(llm=llm)
index = VectorStoreIndex.from_documents(docs, service_context=service_context)
return index
pplx_api_key = st.secrets.pplx_key
llm = Perplexity(
api_key=pplx_api_key, model="pplx-70b-chat", temperature=0.4, system_prompt="Du bist ein Experte für die Leipziger Städtischen Bibliotheken. Du hilfst Nutzerinnen und Nutzern dabei, die Bibliothek zu benutzen. Du beantwortest Fragen zum Ausleihbetrieb, zu den Standorten und den verfügbaren Services. Deine Antworten sollen auf Fakten basieren. Halluziniere keine Informationen über die Bibliotheken, die nicht auf Fakten basieren. Wenn Du eine Information über die Bibliotheken nicht hast, sage den Nutzenden, dass Du Ihnen nicht weiterhelfen kannst. Antworte auf Deutsch."
)
st.header("Der LSB-Service-Chat 💬 📚")
if "messages" not in st.session_state.keys(): # Initialize the chat message history
st.session_state.messages = [
{"role": "assistant", "content": "Was möchten Sie über die Leipziger Städtischen Bibliotheken wissen?"}
]
index = load_data()
chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
if prompt := st.chat_input("Ihre Frage"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Ich denke nach ..."):
response = chat_engine.chat(prompt)
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.Perplexity"
] | [((168, 204), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(True)'}), '(show_spinner=True)\n', (185, 204), True, 'import streamlit as st\n'), ((657, 1257), 'llama_index.llms.Perplexity', 'Perplexity', ([], {'api_key': 'pplx_api_key', 'model': '"""pplx-70b-chat"""', 'temperature': '(0.4)', 'system_prompt': '"""Du bist ein Experte für die Leipziger Städtischen Bibliotheken. Du hilfst Nutzerinnen und Nutzern dabei, die Bibliothek zu benutzen. Du beantwortest Fragen zum Ausleihbetrieb, zu den Standorten und den verfügbaren Services. Deine Antworten sollen auf Fakten basieren. Halluziniere keine Informationen über die Bibliotheken, die nicht auf Fakten basieren. Wenn Du eine Information über die Bibliotheken nicht hast, sage den Nutzenden, dass Du Ihnen nicht weiterhelfen kannst. Antworte auf Deutsch."""'}), "(api_key=pplx_api_key, model='pplx-70b-chat', temperature=0.4,\n system_prompt=\n 'Du bist ein Experte für die Leipziger Städtischen Bibliotheken. Du hilfst Nutzerinnen und Nutzern dabei, die Bibliothek zu benutzen. Du beantwortest Fragen zum Ausleihbetrieb, zu den Standorten und den verfügbaren Services. Deine Antworten sollen auf Fakten basieren. Halluziniere keine Informationen über die Bibliotheken, die nicht auf Fakten basieren. Wenn Du eine Information über die Bibliotheken nicht hast, sage den Nutzenden, dass Du Ihnen nicht weiterhelfen kannst. Antworte auf Deutsch.'\n )\n", (667, 1257), False, 'from llama_index.llms import Perplexity\n'), ((1251, 1288), 'streamlit.header', 'st.header', (['"""Der LSB-Service-Chat 💬 📚"""'], {}), "('Der LSB-Service-Chat 💬 📚')\n", (1260, 1288), True, 'import streamlit as st\n'), ((1311, 1334), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1332, 1334), True, 'import streamlit as st\n'), ((1643, 1670), 'streamlit.chat_input', 'st.chat_input', (['"""Ihre Frage"""'], {}), "('Ihre Frage')\n", (1656, 1670), True, 'import streamlit as st\n'), ((1725, 1794), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (1757, 1794), True, 'import streamlit as st\n'), ((231, 336), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Die LSB-Informationen werden indiziert. Das dauert nur ein paar Augenblicke."""'}), "(text=\n 'Die LSB-Informationen werden indiziert. Das dauert nur ein paar Augenblicke.'\n )\n", (241, 336), True, 'import streamlit as st\n'), ((345, 408), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""chatbot/data"""', 'recursive': '(True)'}), "(input_dir='chatbot/data', recursive=True)\n", (366, 408), False, 'from llama_index import SimpleDirectoryReader\n'), ((469, 506), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (497, 506), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((523, 593), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (554, 593), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((1881, 1913), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (1896, 1913), True, 'import streamlit as st\n'), ((1923, 1951), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (1931, 1951), True, 'import streamlit as st\n'), ((2084, 2112), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2099, 2112), True, 'import streamlit as st\n'), ((2127, 2159), 'streamlit.spinner', 'st.spinner', (['"""Ich denke nach ..."""'], {}), "('Ich denke nach ...')\n", (2137, 2159), True, 'import streamlit as st\n'), ((2221, 2248), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2229, 2248), True, 'import streamlit as st\n'), ((2335, 2376), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (2367, 2376), True, 'import streamlit as st\n')] |
import logging
import os
import sys
from shutil import rmtree
import openai
from llama_index import ServiceContext, SimpleDirectoryReader, TreeIndex
from llama_index.llms.openai import OpenAI
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
openai.api_key = os.environ['OPENAI_API_KEY']
service_context = ServiceContext.from_defaults(llm=OpenAI())
def build_index(data_dir: str, knowledge_base_dir: str) -> None:
"""Build the vector index from the markdown files in the directory."""
print("Building vector index...")
documents = SimpleDirectoryReader(data_dir).load_data()
index = TreeIndex.from_documents(documents, service_context=service_context)
index.storage_context.persist(persist_dir=knowledge_base_dir)
print("Done.")
def main() -> None:
"""Build the vector index from the markdown files in the directory."""
base_dir = os.path.dirname(os.path.abspath(__file__))
knowledge_base_dir = os.path.join(base_dir, "kb")
# Delete Storage Directory
if os.path.exists(knowledge_base_dir):
rmtree(knowledge_base_dir)
data_dir = os.path.join(base_dir, "content", "blogs")
build_index(data_dir, knowledge_base_dir)
if __name__ == "__main__":
main()
| [
"llama_index.SimpleDirectoryReader",
"llama_index.TreeIndex.from_documents",
"llama_index.llms.openai.OpenAI"
] | [((194, 252), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (213, 252), False, 'import logging\n'), ((284, 324), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (305, 324), False, 'import logging\n'), ((688, 756), 'llama_index.TreeIndex.from_documents', 'TreeIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (712, 756), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, TreeIndex\n'), ((1026, 1054), 'os.path.join', 'os.path.join', (['base_dir', '"""kb"""'], {}), "(base_dir, 'kb')\n", (1038, 1054), False, 'import os\n'), ((1093, 1127), 'os.path.exists', 'os.path.exists', (['knowledge_base_dir'], {}), '(knowledge_base_dir)\n', (1107, 1127), False, 'import os\n'), ((1179, 1221), 'os.path.join', 'os.path.join', (['base_dir', '"""content"""', '"""blogs"""'], {}), "(base_dir, 'content', 'blogs')\n", (1191, 1221), False, 'import os\n'), ((253, 272), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (270, 272), False, 'import logging\n'), ((425, 433), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (431, 433), False, 'from llama_index.llms.openai import OpenAI\n'), ((974, 999), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (989, 999), False, 'import os\n'), ((1137, 1163), 'shutil.rmtree', 'rmtree', (['knowledge_base_dir'], {}), '(knowledge_base_dir)\n', (1143, 1163), False, 'from shutil import rmtree\n'), ((631, 662), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['data_dir'], {}), '(data_dir)\n', (652, 662), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, TreeIndex\n')] |
import logging
import sys
from dotenv import load_dotenv
from llama_index.core import VectorStoreIndex
from llama_index.readers.web import SimpleWebPageReader
def setup_logging():
"""
Initialize logging configuration to output logs to stdout.
"""
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
def load_environment_variables():
"""
Load environment variables from the .env file.
"""
load_dotenv()
def load_web_data(url):
"""
Load data from a web page using SimpleWebPageReader.
:param url: The URL of the web page to load.
:return: A list of loaded documents.
"""
return SimpleWebPageReader(html_to_text=True).load_data(urls=[url])
def create_vector_store_index(documents):
"""
Create a VectorStoreIndex from the loaded documents.
:param documents: The list of loaded documents.
:return: The created VectorStoreIndex.
"""
return VectorStoreIndex.from_documents(documents)
def query_index(index, query):
"""
Query the VectorStoreIndex using the provided query.
:param index: The VectorStoreIndex to query.
:param query: The query string.
:return: The response from the query engine.
"""
query_engine = index.as_query_engine()
return query_engine.query(query)
def main():
"""
Main function to orchestrate the data loading, indexing, and querying process.
"""
setup_logging()
load_environment_variables()
url = 'https://www.llamaindex.ai/blog/agentic-rag-with-llamaindex-2721b8a49ff6'
documents = load_web_data(url)
index = create_vector_store_index(documents)
query = "Agentic RAG is an example of:"
response = query_index(index, query)
print(response)
if __name__ == "__main__":
main() | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.readers.web.SimpleWebPageReader"
] | [((264, 322), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (283, 322), False, 'import logging\n'), ((506, 519), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (517, 519), False, 'from dotenv import load_dotenv\n'), ((1002, 1044), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1033, 1044), False, 'from llama_index.core import VectorStoreIndex\n'), ((358, 398), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (379, 398), False, 'import logging\n'), ((327, 346), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (344, 346), False, 'import logging\n'), ((719, 757), 'llama_index.readers.web.SimpleWebPageReader', 'SimpleWebPageReader', ([], {'html_to_text': '(True)'}), '(html_to_text=True)\n', (738, 757), False, 'from llama_index.readers.web import SimpleWebPageReader\n')] |
import logging
import sys
import pandas as pd
import os
from llmload import LoadLLM
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.evaluation import DatasetGenerator, RelevancyEvaluator
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
ServiceContext,
LLMPredictor,
Response,
)
source_directory = os.environ.get('SOURCE_DIRECTORY', 'source_documents')
model_n_ctx = os.environ.get('MODEL_N_CTX')
model_n_batch = os.environ.get('MODEL_N_BATCH')
def generateQuestion():
reader = SimpleDirectoryReader(source_directory)
documents = reader.load_data()
data_generator = DatasetGenerator.from_documents(documents)
eval_questions = data_generator.generate_questions_from_nodes()
print(eval_questions)
# define jupyter display function
def display_eval_df(query: str, response: Response, eval_result: str) -> None:
eval_df = pd.DataFrame(
{
"Query": query,
"Response": str(response),
"Source": (
response.source_nodes[0].node.get_content()[:1000] + "..."
),
"Evaluation Result": eval_result,
},
index=[0],
)
eval_df = eval_df.style.set_properties(
**{
"inline-size": "600px",
"overflow-wrap": "break-word",
},
subset=["Response", "Source"]
)
print(eval_df)
def generateQuestionEval():
reader = SimpleDirectoryReader(source_directory)
documents = reader.load_data()
data_generator = DatasetGenerator.from_documents(documents)
eval_questions = data_generator.generate_questions_from_nodes()
print(eval_questions)
service_context_gpt4 = ServiceContext.from_defaults(llm=LoadLLM(n_ctx=model_n_ctx, n_batch=model_n_batch, verbose=False))
evaluator_gpt4 = RelevancyEvaluator(service_context=service_context_gpt4)
vector_index = VectorStoreIndex.from_documents(
documents, service_context=service_context_gpt4
)
query_engine = vector_index.as_query_engine()
response_vector = query_engine.query(eval_questions[1])
eval_result = evaluator_gpt4.evaluate_response(
query=eval_questions[1], response=response_vector
)
display_eval_df(eval_questions[1], response_vector, eval_result)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.evaluation.DatasetGenerator.from_documents",
"llama_index.evaluation.RelevancyEvaluator"
] | [((84, 142), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (103, 142), False, 'import logging\n'), ((437, 491), 'os.environ.get', 'os.environ.get', (['"""SOURCE_DIRECTORY"""', '"""source_documents"""'], {}), "('SOURCE_DIRECTORY', 'source_documents')\n", (451, 491), False, 'import os\n'), ((506, 535), 'os.environ.get', 'os.environ.get', (['"""MODEL_N_CTX"""'], {}), "('MODEL_N_CTX')\n", (520, 535), False, 'import os\n'), ((552, 583), 'os.environ.get', 'os.environ.get', (['"""MODEL_N_BATCH"""'], {}), "('MODEL_N_BATCH')\n", (566, 583), False, 'import os\n'), ((174, 214), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (195, 214), False, 'import logging\n'), ((626, 665), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['source_directory'], {}), '(source_directory)\n', (647, 665), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, LLMPredictor, Response\n'), ((723, 765), 'llama_index.evaluation.DatasetGenerator.from_documents', 'DatasetGenerator.from_documents', (['documents'], {}), '(documents)\n', (754, 765), False, 'from llama_index.evaluation import DatasetGenerator, RelevancyEvaluator\n'), ((1533, 1572), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['source_directory'], {}), '(source_directory)\n', (1554, 1572), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, LLMPredictor, Response\n'), ((1630, 1672), 'llama_index.evaluation.DatasetGenerator.from_documents', 'DatasetGenerator.from_documents', (['documents'], {}), '(documents)\n', (1661, 1672), False, 'from llama_index.evaluation import DatasetGenerator, RelevancyEvaluator\n'), ((1917, 1973), 'llama_index.evaluation.RelevancyEvaluator', 'RelevancyEvaluator', ([], {'service_context': 'service_context_gpt4'}), '(service_context=service_context_gpt4)\n', (1935, 1973), False, 'from llama_index.evaluation import DatasetGenerator, RelevancyEvaluator\n'), ((1994, 2079), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context_gpt4'}), '(documents, service_context=service_context_gpt4\n )\n', (2025, 2079), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, LLMPredictor, Response\n'), ((143, 162), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (160, 162), False, 'import logging\n'), ((1830, 1894), 'llmload.LoadLLM', 'LoadLLM', ([], {'n_ctx': 'model_n_ctx', 'n_batch': 'model_n_batch', 'verbose': '(False)'}), '(n_ctx=model_n_ctx, n_batch=model_n_batch, verbose=False)\n', (1837, 1894), False, 'from llmload import LoadLLM\n')] |
# %% [markdown]
# # Llama-Index Quickstart
#
# In this quickstart you will create a simple Llama Index App and learn how to log it and get feedback on an LLM response.
#
# For evaluation, we will leverage the "hallucination triad" of groundedness, context relevance and answer relevance.
#
# [](https://colab.research.google.com/github/truera/trulens/blob/main/trulens_eval/examples/quickstart/llama_index_quickstart.ipynb)
# %%
# %pip install -qU "trulens_eval>=0.19.2" "llama_index>0.9.17" "html2text>=2020.1.16" qdrant_client python-dotenv ipywidgets streamlit_jupyter "litellm>=1.15.1" google-cloud-aiplatform
import os
from trulens_eval import Feedback, TruLlama
from trulens_eval.feedback import Groundedness
from trulens_eval import LiteLLM
import numpy as np
from trulens_eval import Tru
from google.cloud import aiplatform
from llama_index.readers.web import SimpleWebPageReader
from llama_index import VectorStoreIndex, StorageContext, ServiceContext
from llama_index.embeddings import GeminiEmbedding
from llama_index.llms import Gemini
from llama_index.vector_stores import QdrantVectorStore
import qdrant_client
from llama_index import StorageContext
GOOGLE_API_KEY = os.environ["GEMINI_API_KEY"]
# This is used by the LiteLLM for Vertex AI models including Gemini.
# The LiteLLM wrapper for Gemini is used by the TruLens evaluation provider.
aiplatform.init(project="fovi-site", location="us-west1")
tru = Tru(database_redact_keys=True)
# ### Create Simple LLM Application
#
# This example uses LlamaIndex which internally uses an OpenAI LLM.
__documents = SimpleWebPageReader(html_to_text=True).load_data(
["http://paulgraham.com/worked.html"]
)
# from llama_index.vector_stores import ChromaVectorStore
# import chromadb
# # initialize client, setting path to save data
# db = chromadb.PersistentClient(path="./chroma_db")
# # create collection
# chroma_collection = db.get_or_create_collection("quickstart")
# # assign chroma as the vector_store to the context
# vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
# Create a local Qdrant vector store
__client = qdrant_client.QdrantClient(path="qdrant_gemini_3")
__vector_store = QdrantVectorStore(client=__client, collection_name="collection")
# Using the embedding model to Gemini
__embed_model = GeminiEmbedding(
model_name="models/embedding-001", api_key=GOOGLE_API_KEY
)
__service_context = ServiceContext.from_defaults(
llm=Gemini(api_key=GOOGLE_API_KEY), embed_model=__embed_model
)
__storage_context = StorageContext.from_defaults(vector_store=__vector_store)
__index = VectorStoreIndex.from_documents(
__documents,
service_context=__service_context,
storage_context=__storage_context,
show_progress=True,
)
def load_llamaindex_app():
return __index.as_query_engine()
query_engine = load_llamaindex_app()
# response = query_engine.query("What does the author say about their education?")
# print(response)
# response = query_engine.query("Where did the author go to school?")
# print(response)
# response = query_engine.query("Who was the author's Harvard PhD advisor?")
# print(response)
# response = query_engine.query("who was Tom Cheatham to the author?")
# print(response)
# response = query_engine.query("who is Tom? why is he in this story?")
# print(response)
# response = query_engine.query("what is this story about? what are the most important things the author want the reader to learn?")
# print(response)
# ## Initialize Feedback Function(s)
# import litellm
# litellm.set_verbose=True
# Initialize provider class
gemini_provider = LiteLLM(model_engine="gemini-pro")
grounded = Groundedness(groundedness_provider=gemini_provider)
# Define a groundedness feedback function
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons)
.on(TruLlama.select_source_nodes().node.text.collect())
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
# Question/answer relevance between overall question and answer.
f_qa_relevance = Feedback(gemini_provider.relevance).on_input_output()
# Question/statement relevance between question and each context chunk.
f_qs_relevance = (
Feedback(gemini_provider.qs_relevance)
.on_input()
.on(TruLlama.select_source_nodes().node.text)
.aggregate(np.mean)
)
# ## Instrument app for logging with TruLens
tru_query_engine_recorder = TruLlama(
query_engine,
tru=tru,
app_id="PaulGraham",
initial_app_loader=load_llamaindex_app,
feedbacks=[f_groundedness, f_qa_relevance, f_qs_relevance],
)
# # or as context manager
# with tru_query_engine_recorder as recording:
# response = query_engine.query("Why did the author drop AI?")
# print(response)
# ## Explore in a Dashboard
tru.run_dashboard() # open a local streamlit app to explore
# tru.run_dashboard_in_jupyter() # open a streamlit app in the notebook
# tru.stop_dashboard(force=True) # stop if needed
# Alternatively, you can run `trulens-eval` from a command line in the same folder to start the dashboard.
# Note: Feedback functions evaluated in the deferred manner can be seen in the "Progress" page of the TruLens dashboard.
# ## Or view results directly in your notebook
# tru.get_records_and_feedback(app_ids=[])[0] # pass an empty list of app_ids to get all
# def load_llamaindex_app():
# # from llama_index import VectorStoreIndex
# index = VectorStoreIndex.from_documents(documents)
# query_engine = index.as_query_engine()
# return query_engine
# app2 = load_llamaindex_app()
# # tru_app2 = tru.Llama(
# # Can't specify which Tru instance to use with tru.Llama.
# tru_app2 = TruLlama(
# app2,
# tru=tru,
# app_id="llamaindex_appZZ",
# initial_app_loader=load_llamaindex_app,
# feedbacks=[f_groundedness, f_qa_relevance, f_qs_relevance]
# )
# tru.add_app(tru_app2)
# from trulens_eval.appui import AppUI
# aui = AppUI(
# app=tru_app2,
# app_selectors=[
# ],
# record_selectors=[
# "app.retriever.retrieve[0].rets[:].score",
# "app.retriever.retrieve[0].rets[:].node.text",
# ]
# )
# aui.widget
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.vector_stores.QdrantVectorStore",
"llama_index.llms.Gemini",
"llama_index.StorageContext.from_defaults",
"llama_index.readers.web.SimpleWebPageReader",
"llama_index.embeddings.GeminiEmbedding"
] | [((1436, 1493), 'google.cloud.aiplatform.init', 'aiplatform.init', ([], {'project': '"""fovi-site"""', 'location': '"""us-west1"""'}), "(project='fovi-site', location='us-west1')\n", (1451, 1493), False, 'from google.cloud import aiplatform\n'), ((1501, 1531), 'trulens_eval.Tru', 'Tru', ([], {'database_redact_keys': '(True)'}), '(database_redact_keys=True)\n', (1504, 1531), False, 'from trulens_eval import Tru\n'), ((2190, 2240), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""qdrant_gemini_3"""'}), "(path='qdrant_gemini_3')\n", (2216, 2240), False, 'import qdrant_client\n'), ((2259, 2323), 'llama_index.vector_stores.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': '__client', 'collection_name': '"""collection"""'}), "(client=__client, collection_name='collection')\n", (2276, 2323), False, 'from llama_index.vector_stores import QdrantVectorStore\n'), ((2379, 2453), 'llama_index.embeddings.GeminiEmbedding', 'GeminiEmbedding', ([], {'model_name': '"""models/embedding-001"""', 'api_key': 'GOOGLE_API_KEY'}), "(model_name='models/embedding-001', api_key=GOOGLE_API_KEY)\n", (2394, 2453), False, 'from llama_index.embeddings import GeminiEmbedding\n'), ((2598, 2655), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': '__vector_store'}), '(vector_store=__vector_store)\n', (2626, 2655), False, 'from llama_index import StorageContext\n'), ((2667, 2806), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['__documents'], {'service_context': '__service_context', 'storage_context': '__storage_context', 'show_progress': '(True)'}), '(__documents, service_context=\n __service_context, storage_context=__storage_context, show_progress=True)\n', (2698, 2806), False, 'from llama_index import VectorStoreIndex, StorageContext, ServiceContext\n'), ((3677, 3711), 'trulens_eval.LiteLLM', 'LiteLLM', ([], {'model_engine': '"""gemini-pro"""'}), "(model_engine='gemini-pro')\n", (3684, 3711), False, 'from trulens_eval import LiteLLM\n'), ((3724, 3775), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'gemini_provider'}), '(groundedness_provider=gemini_provider)\n', (3736, 3775), False, 'from trulens_eval.feedback import Groundedness\n'), ((4473, 4634), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'tru': 'tru', 'app_id': '"""PaulGraham"""', 'initial_app_loader': 'load_llamaindex_app', 'feedbacks': '[f_groundedness, f_qa_relevance, f_qs_relevance]'}), "(query_engine, tru=tru, app_id='PaulGraham', initial_app_loader=\n load_llamaindex_app, feedbacks=[f_groundedness, f_qa_relevance,\n f_qs_relevance])\n", (4481, 4634), False, 'from trulens_eval import Feedback, TruLlama\n'), ((1654, 1692), 'llama_index.readers.web.SimpleWebPageReader', 'SimpleWebPageReader', ([], {'html_to_text': '(True)'}), '(html_to_text=True)\n', (1673, 1692), False, 'from llama_index.readers.web import SimpleWebPageReader\n'), ((2518, 2548), 'llama_index.llms.Gemini', 'Gemini', ([], {'api_key': 'GOOGLE_API_KEY'}), '(api_key=GOOGLE_API_KEY)\n', (2524, 2548), False, 'from llama_index.llms import Gemini\n'), ((4117, 4152), 'trulens_eval.Feedback', 'Feedback', (['gemini_provider.relevance'], {}), '(gemini_provider.relevance)\n', (4125, 4152), False, 'from trulens_eval import Feedback, TruLlama\n'), ((4330, 4360), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (4358, 4360), False, 'from trulens_eval import Feedback, TruLlama\n'), ((3842, 3898), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {}), '(grounded.groundedness_measure_with_cot_reasons)\n', (3850, 3898), False, 'from trulens_eval import Feedback, TruLlama\n'), ((4267, 4305), 'trulens_eval.Feedback', 'Feedback', (['gemini_provider.qs_relevance'], {}), '(gemini_provider.qs_relevance)\n', (4275, 4305), False, 'from trulens_eval import Feedback, TruLlama\n'), ((3907, 3937), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (3935, 3937), False, 'from trulens_eval import Feedback, TruLlama\n')] |
import os
import shutil
import tarfile
import tempfile
import time
from pathlib import Path
import arxiv
import openai
import pandas as pd
import pdfplumber
import streamlit as st
from llama_index import (KeywordTableIndex, KnowledgeGraphIndex,
ServiceContext, SimpleDirectoryReader, SummaryIndex,
TreeIndex, VectorStoreIndex, download_loader,
set_global_service_context)
from llama_index.llms import OpenAI, Xinference
from llama_index.schema import Document
from PIL import Image
from st_files_connection import FilesConnection
from xinference.client import RESTfulClient
from pdfextract.export_annotation import export_annotation
from pdfextract.pdf_extract import pdf_extract
from texannotate.annotate_file import annotate_file
from texannotate.color_annotation import ColorAnnotation
from texcompile.client import compile_pdf_return_bytes
from utils.utils import (find_latex_file, postprocess_latex, preprocess_latex,
tup2str)
st.set_page_config(page_title='Chat with arXiv paper without PDF noise, powered by LaTeX Rainbow.', layout="wide")
texcompile_host = st.secrets.texcompile_host
texcompile_port = st.secrets.texcompile_port
def main():
"""
The main function for the Streamlit app.
:return: None.
"""
st.title("Chat with arXiv paper, without PDF noise")
st.sidebar.markdown('# Github link: [LaTeX Rainbow](https://github.com/InsightsNet/texannotate)')
st.sidebar.markdown("""<small>It's always good practice to verify that a website is safe before giving it your API key.
This site is open source, so you can check the code yourself, or run the streamlit app locally.</small>""", unsafe_allow_html=True)
col1, col2 = st.columns([1, 0.8], gap='medium')
with col2:
with st.form("my_form"):
api_key = st.text_input("Enter OpenAI API key here.", type='password')
arxiv_id = st.text_input("Please enter a arXiv paper id:", value='1601.00978')
submitted = st.form_submit_button("Submit and process arXiv paper (click once and wait)")
if submitted:
process_submit_button(col1, col2, arxiv_id, api_key)
index = load_data()
st.session_state["index"] = index
if 'index' in st.session_state:
if "imgs" in st.session_state.keys():
with col1.container():
for img in st.session_state["imgs"]:
st.image(img)
chat_engine = st.session_state["index"].as_chat_engine(chat_mode="condense_question", verbose=True)
if "messages" not in st.session_state.keys(): # Initialize the chat message history
st.session_state.messages = [
{"role": "assistant", "content": "Ask me a question about the paper!"}
]
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = chat_engine.chat(prompt)
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
def process_submit_button(col1, col2, arxiv_id, api_key):
with col2:
with st.spinner("Downloading LaTeX code..."):
filename = validate_input(arxiv_id)
if not filename:
st.error("id not found on arXiv, or the paper doesn't contain LaTeX code.")
return
with st.spinner("Annotating LaTeX code... please wait..."):
df_toc, df_data = extract_file(filename, col1)
df_data.to_csv('data.csv', sep='\t')
with st.spinner("Loading llm..."):
if api_key == '':
st.error('Please set your OpenAI key.')
if api_key == 'local':
set_local_llm()
else:
openai.api_key = api_key
set_openai_llm()
st.info("Now you get a cleaned PDF. Only colored part are penned by paper author. Extracted text are resorted by the reading order.", icon="📃")
@st.cache_resource(show_spinner=True)
def load_data():
df_data = pd.read_csv('data.csv', sep='\t')
text = ''
section_id = 0
df_data.index.name='myindex'
for i, row in df_data[df_data['reading_order']!=-1].sort_values(by=['reading_order', 'myindex']).iterrows():
if row['section_id'] > section_id:
text += '\n'
section_id = row['section_id']
if row['label'] != 'Figure':
text += row['token'] + ' '
sections = text.split('\n')
docs = [Document(text=section) for section in sections]
with st.spinner(text="Loading and indexing the paper - hang tight! This should take 1-2 minutes."):
index = VectorStoreIndex.from_documents(docs)
return index
def validate_input(arxiv_id):
try:
paper = next(arxiv.Search(id_list=[arxiv_id]).results())
filename = paper.download_source()
return filename
except:
return False
def extract_file(filename, col1):
with col1:
placeholder = st.empty()
st.session_state['imgs'] = []
try:
Path("tmp").mkdir(parents=True, exist_ok=True)
td = 'tmp'
#print('temp dir', td)
with tarfile.open(filename ,'r:gz') as tar:
tar.extractall(td)
preprocess_latex(td)
basename, pdf_bytes = compile_pdf_return_bytes(
sources_dir=td,
host=texcompile_host,
port=texcompile_port
) # compile the unmodified latex firstly
with placeholder.container():
for page in pdfplumber.open(pdf_bytes).pages:
image = page.to_image(resolution=300).original
st.image(image)
shapes, tokens = pdf_extract(pdf_bytes)
## get colors
color_dict = ColorAnnotation()
for rect in shapes:
color_dict.add_existing_color(tup2str(rect['stroking_color']))
for token in tokens:
color_dict.add_existing_color(token['color'])
shutil.rmtree(td)
Path("tmp").mkdir(parents=True, exist_ok=True)
with tarfile.open(filename ,'r:gz') as tar:
tar.extractall(td)
tex_file = Path(find_latex_file(Path(basename).stem, basepath=td)).name
annotate_file(tex_file, color_dict, latex_context=None, basepath=td)
postprocess_latex(str(Path(find_latex_file(Path(basename).stem, basepath=td))))
basename, pdf_bytes_mod = compile_pdf_return_bytes(
sources_dir=td,
host=texcompile_host,
port=texcompile_port
) # compile the modified latex
placeholder.empty()
with placeholder.container():
for page in pdfplumber.open(pdf_bytes_mod).pages:
image = page.to_image(resolution=300).original
st.image(image)
shapes, tokens = pdf_extract(pdf_bytes_mod)
df_toc, df_data = export_annotation(shapes, tokens, color_dict)
shutil.rmtree(td)
colors = {
"Abstract":(255, 182, 193), "Author":(0, 0, 139), "Caption":(57, 230, 10),
"Equation":(255, 0, 0),"Figure":(230, 51, 249),"Footer":(255, 255, 255),
"List":(46, 33, 109),"Paragraph":(181, 196, 220),"Reference":(81, 142, 32),
"Section":(24, 14, 248),"Table":(129, 252, 254),"Title":(97, 189, 251)
}
imgs = []
placeholder.empty()
with placeholder.container():
for i, page in enumerate(pdfplumber.open(pdf_bytes).pages):
image = page.to_image(resolution=300)
for _, rect in df_data.iterrows():
if rect['page'] == i+1:
color = colors.get(rect['label'], (0,0,0))
image.draw_rect((rect['x0'], rect['y0'], rect['x1'], rect['y1']), fill=(color[0],color[1],color[2],70), stroke=color, stroke_width=1)
imgs.append(image.annotated)
st.image(image.annotated)
st.session_state['imgs'] = imgs
return df_toc, df_data
except Exception as e:
raise e
#st.error("LaTeX code parsing error, please follow LaTeX Rainbow's example to add new parsing rules.")
return None, None
def set_local_llm():
port = 9997 # replace with your endpoint port number
client = RESTfulClient(f"http://localhost:{port}")
# Download and Launch a model, this may take a while the first time
model_uid = client.launch_model(
model_name="llama-2-chat",
model_size_in_billions=7,
model_format="pytorch",
quantization="none",
)
# Initiate Xinference object to use the LLM
llm = Xinference(
endpoint=f"http://localhost:{port}",
model_uid=model_uid,
temperature=0.5,
max_tokens=512,
)
service_context = ServiceContext.from_defaults(
llm=llm, embed_model="local:BAAI/bge-small-en"
)
set_global_service_context(service_context)
def set_openai_llm():
service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-3.5-turbo", temperature=0.5, system_prompt="You are an expert on the paper and your job is to answer technical questions. Keep your answers precise and based on facts – do not hallucinate features."))
set_global_service_context(service_context)
if __name__ == '__main__':
main() | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.llms.Xinference",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.set_global_service_context",
"llama_index.schema.Document"
] | [((1035, 1158), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with arXiv paper without PDF noise, powered by LaTeX Rainbow."""', 'layout': '"""wide"""'}), "(page_title=\n 'Chat with arXiv paper without PDF noise, powered by LaTeX Rainbow.',\n layout='wide')\n", (1053, 1158), True, 'import streamlit as st\n'), ((4713, 4749), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(True)'}), '(show_spinner=True)\n', (4730, 4749), True, 'import streamlit as st\n'), ((1338, 1390), 'streamlit.title', 'st.title', (['"""Chat with arXiv paper, without PDF noise"""'], {}), "('Chat with arXiv paper, without PDF noise')\n", (1346, 1390), True, 'import streamlit as st\n'), ((1395, 1502), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""# Github link: [LaTeX Rainbow](https://github.com/InsightsNet/texannotate)"""'], {}), "(\n '# Github link: [LaTeX Rainbow](https://github.com/InsightsNet/texannotate)'\n )\n", (1414, 1502), True, 'import streamlit as st\n'), ((1497, 1783), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""<small>It\'s always good practice to verify that a website is safe before giving it your API key. \n This site is open source, so you can check the code yourself, or run the streamlit app locally.</small>"""'], {'unsafe_allow_html': '(True)'}), '(\n """<small>It\'s always good practice to verify that a website is safe before giving it your API key. \n This site is open source, so you can check the code yourself, or run the streamlit app locally.</small>"""\n , unsafe_allow_html=True)\n', (1516, 1783), True, 'import streamlit as st\n'), ((1791, 1825), 'streamlit.columns', 'st.columns', (['[1, 0.8]'], {'gap': '"""medium"""'}), "([1, 0.8], gap='medium')\n", (1801, 1825), True, 'import streamlit as st\n'), ((4781, 4814), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {'sep': '"""\t"""'}), "('data.csv', sep='\\t')\n", (4792, 4814), True, 'import pandas as pd\n'), ((9288, 9329), 'xinference.client.RESTfulClient', 'RESTfulClient', (['f"""http://localhost:{port}"""'], {}), "(f'http://localhost:{port}')\n", (9301, 9329), False, 'from xinference.client import RESTfulClient\n'), ((9635, 9740), 'llama_index.llms.Xinference', 'Xinference', ([], {'endpoint': 'f"""http://localhost:{port}"""', 'model_uid': 'model_uid', 'temperature': '(0.5)', 'max_tokens': '(512)'}), "(endpoint=f'http://localhost:{port}', model_uid=model_uid,\n temperature=0.5, max_tokens=512)\n", (9645, 9740), False, 'from llama_index.llms import OpenAI, Xinference\n'), ((9798, 9874), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local:BAAI/bge-small-en"""'}), "(llm=llm, embed_model='local:BAAI/bge-small-en')\n", (9826, 9874), False, 'from llama_index import KeywordTableIndex, KnowledgeGraphIndex, ServiceContext, SimpleDirectoryReader, SummaryIndex, TreeIndex, VectorStoreIndex, download_loader, set_global_service_context\n'), ((9893, 9936), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (9919, 9936), False, 'from llama_index import KeywordTableIndex, KnowledgeGraphIndex, ServiceContext, SimpleDirectoryReader, SummaryIndex, TreeIndex, VectorStoreIndex, download_loader, set_global_service_context\n'), ((10239, 10282), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (10265, 10282), False, 'from llama_index import KeywordTableIndex, KnowledgeGraphIndex, ServiceContext, SimpleDirectoryReader, SummaryIndex, TreeIndex, VectorStoreIndex, download_loader, set_global_service_context\n'), ((4566, 4719), 'streamlit.info', 'st.info', (['"""Now you get a cleaned PDF. Only colored part are penned by paper author. Extracted text are resorted by the reading order."""'], {'icon': '"""📃"""'}), "(\n 'Now you get a cleaned PDF. Only colored part are penned by paper author. Extracted text are resorted by the reading order.'\n , icon='📃')\n", (4573, 4719), True, 'import streamlit as st\n'), ((5226, 5248), 'llama_index.schema.Document', 'Document', ([], {'text': 'section'}), '(text=section)\n', (5234, 5248), False, 'from llama_index.schema import Document\n'), ((5283, 5386), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading and indexing the paper - hang tight! This should take 1-2 minutes."""'}), "(text=\n 'Loading and indexing the paper - hang tight! This should take 1-2 minutes.'\n )\n", (5293, 5386), True, 'import streamlit as st\n'), ((5394, 5431), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {}), '(docs)\n', (5425, 5431), False, 'from llama_index import KeywordTableIndex, KnowledgeGraphIndex, ServiceContext, SimpleDirectoryReader, SummaryIndex, TreeIndex, VectorStoreIndex, download_loader, set_global_service_context\n'), ((5739, 5749), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (5747, 5749), True, 'import streamlit as st\n'), ((1854, 1872), 'streamlit.form', 'st.form', (['"""my_form"""'], {}), "('my_form')\n", (1861, 1872), True, 'import streamlit as st\n'), ((1896, 1956), 'streamlit.text_input', 'st.text_input', (['"""Enter OpenAI API key here."""'], {'type': '"""password"""'}), "('Enter OpenAI API key here.', type='password')\n", (1909, 1956), True, 'import streamlit as st\n'), ((1980, 2047), 'streamlit.text_input', 'st.text_input', (['"""Please enter a arXiv paper id:"""'], {'value': '"""1601.00978"""'}), "('Please enter a arXiv paper id:', value='1601.00978')\n", (1993, 2047), True, 'import streamlit as st\n'), ((2072, 2149), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit and process arXiv paper (click once and wait)"""'], {}), "('Submit and process arXiv paper (click once and wait)')\n", (2093, 2149), True, 'import streamlit as st\n'), ((2388, 2411), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (2409, 2411), True, 'import streamlit as st\n'), ((2674, 2697), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (2695, 2697), True, 'import streamlit as st\n'), ((2902, 2932), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (2915, 2932), True, 'import streamlit as st\n'), ((2995, 3064), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (3027, 3064), True, 'import streamlit as st\n'), ((3858, 3897), 'streamlit.spinner', 'st.spinner', (['"""Downloading LaTeX code..."""'], {}), "('Downloading LaTeX code...')\n", (3868, 3897), True, 'import streamlit as st\n'), ((4105, 4158), 'streamlit.spinner', 'st.spinner', (['"""Annotating LaTeX code... please wait..."""'], {}), "('Annotating LaTeX code... please wait...')\n", (4115, 4158), True, 'import streamlit as st\n'), ((4282, 4310), 'streamlit.spinner', 'st.spinner', (['"""Loading llm..."""'], {}), "('Loading llm...')\n", (4292, 4310), True, 'import streamlit as st\n'), ((6081, 6170), 'texcompile.client.compile_pdf_return_bytes', 'compile_pdf_return_bytes', ([], {'sources_dir': 'td', 'host': 'texcompile_host', 'port': 'texcompile_port'}), '(sources_dir=td, host=texcompile_host, port=\n texcompile_port)\n', (6105, 6170), False, 'from texcompile.client import compile_pdf_return_bytes\n'), ((6505, 6527), 'pdfextract.pdf_extract.pdf_extract', 'pdf_extract', (['pdf_bytes'], {}), '(pdf_bytes)\n', (6516, 6527), False, 'from pdfextract.pdf_extract import pdf_extract\n'), ((6579, 6596), 'texannotate.color_annotation.ColorAnnotation', 'ColorAnnotation', ([], {}), '()\n', (6594, 6596), False, 'from texannotate.color_annotation import ColorAnnotation\n'), ((6815, 6832), 'shutil.rmtree', 'shutil.rmtree', (['td'], {}), '(td)\n', (6828, 6832), False, 'import shutil\n'), ((7080, 7148), 'texannotate.annotate_file.annotate_file', 'annotate_file', (['tex_file', 'color_dict'], {'latex_context': 'None', 'basepath': 'td'}), '(tex_file, color_dict, latex_context=None, basepath=td)\n', (7093, 7148), False, 'from texannotate.annotate_file import annotate_file\n'), ((7279, 7368), 'texcompile.client.compile_pdf_return_bytes', 'compile_pdf_return_bytes', ([], {'sources_dir': 'td', 'host': 'texcompile_host', 'port': 'texcompile_port'}), '(sources_dir=td, host=texcompile_host, port=\n texcompile_port)\n', (7303, 7368), False, 'from texcompile.client import compile_pdf_return_bytes\n'), ((7728, 7754), 'pdfextract.pdf_extract.pdf_extract', 'pdf_extract', (['pdf_bytes_mod'], {}), '(pdf_bytes_mod)\n', (7739, 7754), False, 'from pdfextract.pdf_extract import pdf_extract\n'), ((7785, 7830), 'pdfextract.export_annotation.export_annotation', 'export_annotation', (['shapes', 'tokens', 'color_dict'], {}), '(shapes, tokens, color_dict)\n', (7802, 7830), False, 'from pdfextract.export_annotation import export_annotation\n'), ((7843, 7860), 'shutil.rmtree', 'shutil.rmtree', (['td'], {}), '(td)\n', (7856, 7860), False, 'import shutil\n'), ((10016, 10243), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)', 'system_prompt': '"""You are an expert on the paper and your job is to answer technical questions. Keep your answers precise and based on facts – do not hallucinate features."""'}), "(model='gpt-3.5-turbo', temperature=0.5, system_prompt=\n 'You are an expert on the paper and your job is to answer technical questions. Keep your answers precise and based on facts – do not hallucinate features.'\n )\n", (10022, 10243), False, 'from llama_index.llms import OpenAI, Xinference\n'), ((3175, 3207), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (3190, 3207), True, 'import streamlit as st\n'), ((3225, 3253), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (3233, 3253), True, 'import streamlit as st\n'), ((3410, 3438), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (3425, 3438), True, 'import streamlit as st\n'), ((3992, 4067), 'streamlit.error', 'st.error', (['"""id not found on arXiv, or the paper doesn\'t contain LaTeX code."""'], {}), '("id not found on arXiv, or the paper doesn\'t contain LaTeX code.")\n', (4000, 4067), True, 'import streamlit as st\n'), ((4358, 4397), 'streamlit.error', 'st.error', (['"""Please set your OpenAI key."""'], {}), "('Please set your OpenAI key.')\n", (4366, 4397), True, 'import streamlit as st\n'), ((5935, 5965), 'tarfile.open', 'tarfile.open', (['filename', '"""r:gz"""'], {}), "(filename, 'r:gz')\n", (5947, 5965), False, 'import tarfile\n'), ((6025, 6045), 'utils.utils.preprocess_latex', 'preprocess_latex', (['td'], {}), '(td)\n', (6041, 6045), False, 'from utils.utils import find_latex_file, postprocess_latex, preprocess_latex, tup2str\n'), ((6910, 6940), 'tarfile.open', 'tarfile.open', (['filename', '"""r:gz"""'], {}), "(filename, 'r:gz')\n", (6922, 6940), False, 'import tarfile\n'), ((2521, 2534), 'streamlit.image', 'st.image', (['img'], {}), '(img)\n', (2529, 2534), True, 'import streamlit as st\n'), ((3461, 3486), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (3471, 3486), True, 'import streamlit as st\n'), ((3564, 3591), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (3572, 3591), True, 'import streamlit as st\n'), ((3694, 3735), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (3726, 3735), True, 'import streamlit as st\n'), ((5519, 5551), 'arxiv.Search', 'arxiv.Search', ([], {'id_list': '[arxiv_id]'}), '(id_list=[arxiv_id])\n', (5531, 5551), False, 'import arxiv\n'), ((5813, 5824), 'pathlib.Path', 'Path', (['"""tmp"""'], {}), "('tmp')\n", (5817, 5824), False, 'from pathlib import Path\n'), ((6338, 6364), 'pdfplumber.open', 'pdfplumber.open', (['pdf_bytes'], {}), '(pdf_bytes)\n', (6353, 6364), False, 'import pdfplumber\n'), ((6459, 6474), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (6467, 6474), True, 'import streamlit as st\n'), ((6675, 6706), 'utils.utils.tup2str', 'tup2str', (["rect['stroking_color']"], {}), "(rect['stroking_color'])\n", (6682, 6706), False, 'from utils.utils import find_latex_file, postprocess_latex, preprocess_latex, tup2str\n'), ((6845, 6856), 'pathlib.Path', 'Path', (['"""tmp"""'], {}), "('tmp')\n", (6849, 6856), False, 'from pathlib import Path\n'), ((7558, 7588), 'pdfplumber.open', 'pdfplumber.open', (['pdf_bytes_mod'], {}), '(pdf_bytes_mod)\n', (7573, 7588), False, 'import pdfplumber\n'), ((7683, 7698), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (7691, 7698), True, 'import streamlit as st\n'), ((8893, 8918), 'streamlit.image', 'st.image', (['image.annotated'], {}), '(image.annotated)\n', (8901, 8918), True, 'import streamlit as st\n'), ((8395, 8421), 'pdfplumber.open', 'pdfplumber.open', (['pdf_bytes'], {}), '(pdf_bytes)\n', (8410, 8421), False, 'import pdfplumber\n'), ((7028, 7042), 'pathlib.Path', 'Path', (['basename'], {}), '(basename)\n', (7032, 7042), False, 'from pathlib import Path\n'), ((7204, 7218), 'pathlib.Path', 'Path', (['basename'], {}), '(basename)\n', (7208, 7218), False, 'from pathlib import Path\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler
from llama_index.legacy.callbacks.arize_phoenix_callback import (
arize_phoenix_callback_handler,
)
from llama_index.legacy.callbacks.base_handler import BaseCallbackHandler
from llama_index.legacy.callbacks.deepeval_callback import deepeval_callback_handler
from llama_index.legacy.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.legacy.callbacks.open_inference_callback import (
OpenInferenceCallbackHandler,
)
from llama_index.legacy.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.legacy.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index.legacy
llama_index.legacy.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
handler = argilla_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.legacy.callbacks.deepeval_callback.deepeval_callback_handler",
"llama_index.legacy.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler",
"llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.legacy.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler"
] | [((1239, 1274), 'llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1259, 1274), False, 'from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1332, 1375), 'llama_index.legacy.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1360, 1375), False, 'from llama_index.legacy.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1433, 1478), 'llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1463, 1478), False, 'from llama_index.legacy.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1532, 1573), 'llama_index.legacy.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1558, 1573), False, 'from llama_index.legacy.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1629, 1662), 'llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1647, 1662), False, 'from llama_index.legacy.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1715, 1755), 'llama_index.legacy.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1740, 1755), False, 'from llama_index.legacy.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1806, 1837), 'llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1822, 1837), False, 'from llama_index.legacy.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((1889, 1928), 'llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (1913, 1928), False, 'from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler\n')] |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Coroutine
from langchain.llms.base import BaseLLM
from nemoguardrails import LLMRails, RailsConfig
COLANG_CONFIG = """
define user express greeting
"hi"
define user express ill intent
"I hate you"
"I want to destroy the world"
define bot express cannot respond
"I'm sorry I cannot help you with that."
define user express question
"What is the current unemployment rate?"
# Basic guardrail example
define flow
user express ill intent
bot express cannot respond
# Question answering flow
define flow
user ...
$answer = execute llama_index_query(query=$last_user_message)
bot $answer
"""
YAML_CONFIG = """
models:
- type: main
engine: openai
model: gpt-3.5-turbo-instruct
"""
def demo():
try:
import llama_index
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.response.schema import StreamingResponse
except ImportError:
raise ImportError(
"Could not import llama_index, please install it with "
"`pip install llama_index`."
)
config = RailsConfig.from_content(COLANG_CONFIG, YAML_CONFIG)
app = LLMRails(config)
def _get_llama_index_query_engine(llm: BaseLLM):
docs = llama_index.SimpleDirectoryReader(
input_files=["../examples/bots/abc/kb/employee-handbook.md"]
).load_data()
llm_predictor = llama_index.LLMPredictor(llm=llm)
index = llama_index.GPTVectorStoreIndex.from_documents(
docs, llm_predictor=llm_predictor
)
default_query_engine = index.as_query_engine()
return default_query_engine
def _get_callable_query_engine(
query_engine: BaseQueryEngine,
) -> Callable[[str], Coroutine[Any, Any, str]]:
async def get_query_response(query: str) -> str:
response = query_engine.query(query)
if isinstance(response, StreamingResponse):
typed_response = response.get_response()
else:
typed_response = response
response_str = typed_response.response
if response_str is None:
return ""
return response_str
return get_query_response
query_engine = _get_llama_index_query_engine(app.llm)
app.register_action(
_get_callable_query_engine(query_engine), name="llama_index_query"
)
history = [{"role": "user", "content": "How many vacation days do I get?"}]
result = app.generate(messages=history)
print(result)
if __name__ == "__main__":
demo()
| [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.LLMPredictor"
] | [((1797, 1849), 'nemoguardrails.RailsConfig.from_content', 'RailsConfig.from_content', (['COLANG_CONFIG', 'YAML_CONFIG'], {}), '(COLANG_CONFIG, YAML_CONFIG)\n', (1821, 1849), False, 'from nemoguardrails import LLMRails, RailsConfig\n'), ((1860, 1876), 'nemoguardrails.LLMRails', 'LLMRails', (['config'], {}), '(config)\n', (1868, 1876), False, 'from nemoguardrails import LLMRails, RailsConfig\n'), ((2100, 2133), 'llama_index.LLMPredictor', 'llama_index.LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (2124, 2133), False, 'import llama_index\n'), ((2150, 2236), 'llama_index.GPTVectorStoreIndex.from_documents', 'llama_index.GPTVectorStoreIndex.from_documents', (['docs'], {'llm_predictor': 'llm_predictor'}), '(docs, llm_predictor=\n llm_predictor)\n', (2196, 2236), False, 'import llama_index\n'), ((1946, 2046), 'llama_index.SimpleDirectoryReader', 'llama_index.SimpleDirectoryReader', ([], {'input_files': "['../examples/bots/abc/kb/employee-handbook.md']"}), "(input_files=[\n '../examples/bots/abc/kb/employee-handbook.md'])\n", (1979, 2046), False, 'import llama_index\n')] |
import sys
from langchain import OpenAI
from pathlib import Path
import llama_index as li
#from llamahub.connectors import TextFileConnector
from llama_index import SimpleDirectoryReader,GPTListIndex,LLMPredictor
file_name = sys.argv[1]
llm_predictor = LLMPredictor(llm=OpenAI(model_name="gpt-3.5-turbo")) #temperature=0,
docs = SimpleDirectoryReader('.', [file_name]).load_data()
index = GPTListIndex(docs)
ex = """Today we finish off our study of collaborative filtering by looking closely at embeddings—a critical building block of many deep learning algorithms. Then we’ll dive into convolutional neural networks (CNNs) and see how they really work. We’ve used plenty of CNNs through this course, but we haven’t peeked inside them to see what’s really going on in there. As well as learning about their most fundamental building block, the convolution, we’ll also look at pooling, dropout, and more."""
q = f"""Here's an example of a lesson summary from a previous fast.ai lesson: "{ex}" Write a four paragraph summary of the fast.ai lesson contained in the following transcript, using a similar informal writing style to the above summary from the previous lesson."""
summary = index.query(q, response_mode="tree_summarize", llm_predictor=llm_predictor)
Path(f'{Path(file_name).stem}-summ.txt').write_text(str(summary))
| [
"llama_index.GPTListIndex",
"llama_index.SimpleDirectoryReader"
] | [((391, 409), 'llama_index.GPTListIndex', 'GPTListIndex', (['docs'], {}), '(docs)\n', (403, 409), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((271, 305), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""'}), "(model_name='gpt-3.5-turbo')\n", (277, 305), False, 'from langchain import OpenAI\n'), ((331, 370), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""."""', '[file_name]'], {}), "('.', [file_name])\n", (352, 370), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((1270, 1285), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (1274, 1285), False, 'from pathlib import Path\n')] |
# Copyright © 2024 Pathway
"""
Pathway vector search server and client.
The server reads source documents and build a vector index over them, then starts serving
HTTP requests.
The client queries the server and returns matching documents.
"""
import asyncio
import functools
import json
import logging
import threading
from collections.abc import Callable, Coroutine
from typing import TYPE_CHECKING
import jmespath
import numpy as np
import requests
import pathway as pw
import pathway.xpacks.llm.parsers
import pathway.xpacks.llm.splitters
from pathway.stdlib.ml import index
from pathway.stdlib.ml.classifiers import _knn_lsh
if TYPE_CHECKING:
import langchain_core.documents
import langchain_core.embeddings
import llama_index.core.schema
def _unwrap_udf(func):
if isinstance(func, pw.UDF):
return func.__wrapped__
return func
# https://stackoverflow.com/a/75094151
class _RunThread(threading.Thread):
def __init__(self, coroutine):
self.coroutine = coroutine
self.result = None
super().__init__()
def run(self):
self.result = asyncio.run(self.coroutine)
def _run_async(coroutine):
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
thread = _RunThread(coroutine)
thread.start()
thread.join()
return thread.result
else:
return asyncio.run(coroutine)
def _coerce_sync(func: Callable) -> Callable:
if asyncio.iscoroutinefunction(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return _run_async(func(*args, **kwargs))
return wrapper
else:
return func
class VectorStoreServer:
"""
Builds a document indexing pipeline and starts an HTTP REST server for nearest neighbors queries.
Args:
- docs: pathway tables typically coming out of connectors which contain source documents.
- embedder: callable that embeds a single document
- parser: callable that parses file contents into a list of documents
- splitter: callable that splits long documents
"""
def __init__(
self,
*docs: pw.Table,
embedder: Callable[[str], list[float] | Coroutine],
parser: Callable[[bytes], list[tuple[str, dict]]] | None = None,
splitter: Callable[[str], list[tuple[str, dict]]] | None = None,
doc_post_processors: (
list[Callable[[str, dict], tuple[str, dict]]] | None
) = None,
index_params: dict | None = None,
):
self.docs = docs
self.parser: Callable[[bytes], list[tuple[str, dict]]] = _unwrap_udf(
parser if parser is not None else pathway.xpacks.llm.parsers.ParseUtf8()
)
self.doc_post_processors = []
if doc_post_processors:
self.doc_post_processors = [
_unwrap_udf(processor)
for processor in doc_post_processors
if processor is not None
]
self.splitter = _unwrap_udf(
splitter
if splitter is not None
else pathway.xpacks.llm.splitters.null_splitter
)
self.embedder = _unwrap_udf(embedder)
# detect the dimensionality of the embeddings
self.embedding_dimension = len(_coerce_sync(self.embedder)("."))
logging.debug("Embedder has dimension %s", self.embedding_dimension)
DEFAULT_INDEX_PARAMS = dict(distance_type="cosine")
if index_params is not None:
DEFAULT_INDEX_PARAMS.update(index_params)
self.index_params = DEFAULT_INDEX_PARAMS
self._graph = self._build_graph()
@classmethod
def from_langchain_components(
cls,
*docs,
embedder: "langchain_core.embeddings.Embeddings",
parser: Callable[[bytes], list[tuple[str, dict]]] | None = None,
splitter: "langchain_core.documents.BaseDocumentTransformer | None" = None,
**kwargs,
):
"""
Initializes VectorStoreServer by using LangChain components.
Args:
- docs: pathway tables typically coming out of connectors which contain source documents
- embedder: Langchain component for embedding documents
- parser: callable that parses file contents into a list of documents
- splitter: Langchaing component for splitting documents into parts
"""
try:
from langchain_core.documents import Document
except ImportError:
raise ImportError(
"Please install langchain_core: `pip install langchain_core`"
)
generic_splitter = None
if splitter:
generic_splitter = lambda x: [ # noqa
(doc.page_content, doc.metadata)
for doc in splitter.transform_documents([Document(page_content=x)])
]
async def generic_embedded(x: str):
res = await embedder.aembed_documents([x])
return res[0]
return cls(
*docs,
embedder=generic_embedded,
parser=parser,
splitter=generic_splitter,
**kwargs,
)
@classmethod
def from_llamaindex_components(
cls,
*docs,
transformations: list["llama_index.core.schema.TransformComponent"],
parser: Callable[[bytes], list[tuple[str, dict]]] | None = None,
**kwargs,
):
"""
Initializes VectorStoreServer by using LlamaIndex TransformComponents.
Args:
- docs: pathway tables typically coming out of connectors which contain source documents
- transformations: list of LlamaIndex components. The last component in this list
is required to inherit from LlamaIndex `BaseEmbedding`
- parser: callable that parses file contents into a list of documents
"""
try:
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.ingestion.pipeline import run_transformations
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
except ImportError:
raise ImportError(
"Please install llama-index-core: `pip install llama-index-core`"
)
try:
from llama_index.legacy.embeddings.base import (
BaseEmbedding as LegacyBaseEmbedding,
)
legacy_llama_index_not_imported = True
except ImportError:
legacy_llama_index_not_imported = False
def node_transformer(x: str) -> list[BaseNode]:
return [TextNode(text=x)]
def node_to_pathway(x: list[BaseNode]) -> list[tuple[str, dict]]:
return [
(node.get_content(metadata_mode=MetadataMode.NONE), node.extra_info)
for node in x
]
if transformations is None or not transformations:
raise ValueError("Transformations list cannot be None or empty.")
if not isinstance(transformations[-1], BaseEmbedding) and (
legacy_llama_index_not_imported
or not isinstance(transformations[-1], LegacyBaseEmbedding)
):
raise ValueError(
f"Last step of transformations should be an instance of {BaseEmbedding.__name__}, "
f"found {type(transformations[-1])}."
)
embedder: BaseEmbedding = transformations.pop()
async def embedding_callable(x: str) -> list[float]:
embedding = await embedder.aget_text_embedding(x)
return embedding
def generic_transformer(x: str) -> list[tuple[str, dict]]:
starting_node = node_transformer(x)
final_node = run_transformations(starting_node, transformations)
return node_to_pathway(final_node)
return VectorStoreServer(
*docs,
embedder=embedding_callable,
parser=parser,
splitter=generic_transformer,
**kwargs,
)
def _build_graph(self) -> dict:
"""
Builds the pathway computation graph for indexing documents and serving queries.
"""
docs_s = self.docs
if not docs_s:
raise ValueError(
"""Please provide at least one data source, e.g. read files from disk:
pw.io.fs.read('./sample_docs', format='binary', mode='static', with_metadata=True)
"""
)
if len(docs_s) == 1:
(docs,) = docs_s
else:
docs: pw.Table = docs_s[0].concat_reindex(*docs_s[1:]) # type: ignore
@pw.udf
def parse_doc(data: bytes, metadata) -> list[pw.Json]:
rets = self.parser(data)
metadata = metadata.value
return [dict(text=ret[0], metadata={**metadata, **ret[1]}) for ret in rets] # type: ignore
parsed_docs = docs.select(data=parse_doc(docs.data, docs._metadata)).flatten(
pw.this.data
)
@pw.udf
def post_proc_docs(data_json: pw.Json) -> pw.Json:
data: dict = data_json.value # type:ignore
text = data["text"]
metadata = data["metadata"]
for processor in self.doc_post_processors:
text, metadata = processor(text, metadata)
return dict(text=text, metadata=metadata) # type: ignore
parsed_docs = parsed_docs.select(data=post_proc_docs(pw.this.data))
@pw.udf
def split_doc(data_json: pw.Json) -> list[pw.Json]:
data: dict = data_json.value # type:ignore
text = data["text"]
metadata = data["metadata"]
rets = self.splitter(text)
return [
dict(text=ret[0], metadata={**metadata, **ret[1]}) # type:ignore
for ret in rets
]
chunked_docs = parsed_docs.select(data=split_doc(pw.this.data)).flatten(
pw.this.data
)
if asyncio.iscoroutinefunction(self.embedder):
@pw.udf
async def embedder(txt):
result = await self.embedder(txt)
return np.asarray(result)
else:
@pw.udf
def embedder(txt):
result = self.embedder(txt)
return np.asarray(result)
chunked_docs += chunked_docs.select(
embedding=embedder(pw.this.data["text"].as_str())
)
knn_index = index.KNNIndex(
chunked_docs.embedding,
chunked_docs,
n_dimensions=self.embedding_dimension,
metadata=chunked_docs.data["metadata"],
**self.index_params, # type:ignore
)
parsed_docs += parsed_docs.select(
modified=pw.this.data["metadata"]["modified_at"].as_int(),
indexed=pw.this.data["metadata"]["seen_at"].as_int(),
path=pw.this.data["metadata"]["path"].as_str(),
)
stats = parsed_docs.reduce(
count=pw.reducers.count(),
last_modified=pw.reducers.max(pw.this.modified),
last_indexed=pw.reducers.max(pw.this.indexed),
paths=pw.reducers.tuple(pw.this.path),
)
return locals()
class StatisticsQuerySchema(pw.Schema):
pass
class QueryResultSchema(pw.Schema):
result: pw.Json
class InputResultSchema(pw.Schema):
result: list[pw.Json]
@pw.table_transformer
def statistics_query(
self, info_queries: pw.Table[StatisticsQuerySchema]
) -> pw.Table[QueryResultSchema]:
stats = self._graph["stats"]
# VectorStore statistics computation
@pw.udf
def format_stats(counts, last_modified, last_indexed) -> pw.Json:
if counts is not None:
response = {
"file_count": counts,
"last_modified": last_modified,
"last_indexed": last_indexed,
}
else:
response = {
"file_count": 0,
"last_modified": None,
"last_indexed": None,
}
return pw.Json(response)
info_results = info_queries.join_left(stats, id=info_queries.id).select(
result=format_stats(stats.count, stats.last_modified, stats.last_indexed)
)
return info_results
class FilterSchema(pw.Schema):
metadata_filter: str | None = pw.column_definition(
default_value=None, description="Metadata filter in JMESPath format"
)
filepath_globpattern: str | None = pw.column_definition(
default_value=None, description="An optional Glob pattern for the file path"
)
InputsQuerySchema = FilterSchema
@staticmethod
def merge_filters(queries: pw.Table):
@pw.udf
def _get_jmespath_filter(
metadata_filter: str, filepath_globpattern: str
) -> str | None:
ret_parts = []
if metadata_filter:
ret_parts.append(f"({metadata_filter})")
if filepath_globpattern:
ret_parts.append(f'globmatch(`"{filepath_globpattern}"`, path)')
if ret_parts:
return " && ".join(ret_parts)
return None
queries = queries.without(
*VectorStoreServer.FilterSchema.__columns__.keys()
) + queries.select(
metadata_filter=_get_jmespath_filter(
pw.this.metadata_filter, pw.this.filepath_globpattern
)
)
return queries
@pw.table_transformer
def inputs_query(
self, input_queries: pw.Table[InputsQuerySchema] # type:ignore
) -> pw.Table[InputResultSchema]:
docs = self._graph["docs"]
# TODO: compare this approach to first joining queries to dicuments, then filtering,
# then grouping to get each response.
# The "dumb" tuple approach has more work precomputed for an all inputs query
all_metas = docs.reduce(metadatas=pw.reducers.tuple(pw.this._metadata))
input_queries = self.merge_filters(input_queries)
@pw.udf
def format_inputs(
metadatas: list[pw.Json] | None, metadata_filter: str | None
) -> list[pw.Json]:
metadatas: list = metadatas if metadatas is not None else [] # type:ignore
assert metadatas is not None
if metadata_filter:
metadatas = [
m
for m in metadatas
if jmespath.search(
metadata_filter, m.value, options=_knn_lsh._glob_options
)
]
return metadatas
input_results = input_queries.join_left(all_metas, id=input_queries.id).select(
all_metas.metadatas, input_queries.metadata_filter
)
input_results = input_results.select(
result=format_inputs(pw.this.metadatas, pw.this.metadata_filter)
)
return input_results
class RetrieveQuerySchema(pw.Schema):
query: str = pw.column_definition(
description="Your query for the similarity search",
example="Pathway data processing framework",
)
k: int = pw.column_definition(
description="The number of documents to provide", example=2
)
metadata_filter: str | None = pw.column_definition(
default_value=None, description="Metadata filter in JMESPath format"
)
filepath_globpattern: str | None = pw.column_definition(
default_value=None, description="An optional Glob pattern for the file path"
)
@pw.table_transformer
def retrieve_query(
self, retrieval_queries: pw.Table[RetrieveQuerySchema]
) -> pw.Table[QueryResultSchema]:
embedder = self._graph["embedder"]
knn_index = self._graph["knn_index"]
# Relevant document search
retrieval_queries = self.merge_filters(retrieval_queries)
retrieval_queries += retrieval_queries.select(
embedding=embedder(pw.this.query),
)
retrieval_results = retrieval_queries + knn_index.get_nearest_items(
retrieval_queries.embedding,
k=pw.this.k,
collapse_rows=True,
metadata_filter=retrieval_queries.metadata_filter,
with_distances=True,
).select(
result=pw.this.data,
dist=pw.this.dist,
)
retrieval_results = retrieval_results.select(
result=pw.apply_with_type(
lambda x, y: pw.Json(
sorted(
[{**res.value, "dist": dist} for res, dist in zip(x, y)],
key=lambda x: x["dist"], # type: ignore
)
),
pw.Json,
pw.this.result,
pw.this.dist,
)
)
return retrieval_results
def run_server(
self,
host,
port,
threaded: bool = False,
with_cache: bool = True,
cache_backend: (
pw.persistence.Backend | None
) = pw.persistence.Backend.filesystem("./Cache"),
):
"""
Builds the document processing pipeline and runs it.
Args:
- host: host to bind the HTTP listener
- port: to bind the HTTP listener
- threaded: if True, run in a thread. Else block computation
- with_cache: if True, embedding requests for the same contents are cached
- cache_backend: the backend to use for caching if it is enabled. The
default is the disk cache, hosted locally in the folder ``./Cache``. You
can use ``Backend`` class of the
[`persistence API`](/developers/api-docs/persistence-api/#pathway.persistence.Backend)
to override it.
Returns:
If threaded, return the Thread object. Else, does not return.
"""
webserver = pw.io.http.PathwayWebserver(host=host, port=port, with_cors=True)
# TODO(move into webserver??)
def serve(route, schema, handler, documentation):
queries, writer = pw.io.http.rest_connector(
webserver=webserver,
route=route,
methods=("GET", "POST"),
schema=schema,
autocommit_duration_ms=50,
delete_completed_queries=True,
documentation=documentation,
)
writer(handler(queries))
serve(
"/v1/retrieve",
self.RetrieveQuerySchema,
self.retrieve_query,
pw.io.http.EndpointDocumentation(
summary="Do a similarity search for your query",
description="Request the given number of documents from the "
"realtime-maintained index.",
method_types=("GET",),
),
)
serve(
"/v1/statistics",
self.StatisticsQuerySchema,
self.statistics_query,
pw.io.http.EndpointDocumentation(
summary="Get current indexer stats",
description="Request for the basic stats of the indexer process. "
"It returns the number of documents that are currently present in the "
"indexer and the time the last of them was added.",
method_types=("GET",),
),
)
serve(
"/v1/inputs",
self.InputsQuerySchema,
self.inputs_query,
pw.io.http.EndpointDocumentation(
summary="Get indexed documents list",
description="Request for the list of documents present in the indexer. "
"It returns the list of metadata objects.",
method_types=("GET",),
),
)
def run():
if with_cache:
if cache_backend is None:
raise ValueError(
"Cache usage was requested but the backend is unspecified"
)
persistence_config = pw.persistence.Config.simple_config(
cache_backend,
persistence_mode=pw.PersistenceMode.UDF_CACHING,
)
else:
persistence_config = None
pw.run(
monitoring_level=pw.MonitoringLevel.NONE,
persistence_config=persistence_config,
)
if threaded:
t = threading.Thread(target=run, name="VectorStoreServer")
t.start()
return t
else:
run()
class VectorStoreClient:
def __init__(
self,
host: str | None = None,
port: int | None = None,
url: str | None = None,
timeout: int = 15,
additional_headers: dict | None = None,
):
"""
A client you can use to query :py:class:`VectorStoreServer`.
Please provide either the `url`, or `host` and `port`.
Args:
- host: host on which `:py:class:`VectorStoreServer` listens
- port: port on which `:py:class:`VectorStoreServer` listens
- url: url at which `:py:class:`VectorStoreServer` listens
- timeout: timeout for the post requests in seconds
"""
err = "Either (`host` and `port`) or `url` must be provided, but not both."
if url is not None:
if host or port:
raise ValueError(err)
self.url = url
else:
if host is None:
raise ValueError(err)
port = port or 80
self.url = f"http://{host}:{port}"
self.timeout = timeout
self.additional_headers = additional_headers or {}
def query(
self, query: str, k: int = 3, metadata_filter: str | None = None
) -> list[dict]:
"""
Perform a query to the vector store and fetch results.
Args:
- query:
- k: number of documents to be returned
- metadata_filter: optional string representing the metadata filtering query
in the JMESPath format. The search will happen only for documents
satisfying this filtering.
"""
data = {"query": query, "k": k}
if metadata_filter is not None:
data["metadata_filter"] = metadata_filter
url = self.url + "/v1/retrieve"
response = requests.post(
url,
data=json.dumps(data),
headers=self._get_request_headers(),
timeout=self.timeout,
)
responses = response.json()
return sorted(responses, key=lambda x: x["dist"])
# Make an alias
__call__ = query
def get_vectorstore_statistics(self):
"""Fetch basic statistics about the vector store."""
url = self.url + "/v1/statistics"
response = requests.post(
url,
json={},
headers=self._get_request_headers(),
timeout=self.timeout,
)
responses = response.json()
return responses
def get_input_files(
self,
metadata_filter: str | None = None,
filepath_globpattern: str | None = None,
):
"""
Fetch information on documents in the the vector store.
Args:
metadata_filter: optional string representing the metadata filtering query
in the JMESPath format. The search will happen only for documents
satisfying this filtering.
filepath_globpattern: optional glob pattern specifying which documents
will be searched for this query.
"""
url = self.url + "/v1/inputs"
response = requests.post(
url,
json={
"metadata_filter": metadata_filter,
"filepath_globpattern": filepath_globpattern,
},
headers=self._get_request_headers(),
timeout=self.timeout,
)
responses = response.json()
return responses
def _get_request_headers(self):
request_headers = {"Content-Type": "application/json"}
request_headers.update(self.additional_headers)
return request_headers
| [
"llama_index.core.ingestion.pipeline.run_transformations",
"llama_index.core.schema.TextNode"
] | [((1515, 1548), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['func'], {}), '(func)\n', (1542, 1548), False, 'import asyncio\n'), ((1111, 1138), 'asyncio.run', 'asyncio.run', (['self.coroutine'], {}), '(self.coroutine)\n', (1122, 1138), False, 'import asyncio\n'), ((1192, 1218), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (1216, 1218), False, 'import asyncio\n'), ((1437, 1459), 'asyncio.run', 'asyncio.run', (['coroutine'], {}), '(coroutine)\n', (1448, 1459), False, 'import asyncio\n'), ((1560, 1581), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1575, 1581), False, 'import functools\n'), ((3406, 3474), 'logging.debug', 'logging.debug', (['"""Embedder has dimension %s"""', 'self.embedding_dimension'], {}), "('Embedder has dimension %s', self.embedding_dimension)\n", (3419, 3474), False, 'import logging\n'), ((10102, 10144), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['self.embedder'], {}), '(self.embedder)\n', (10129, 10144), False, 'import asyncio\n'), ((10588, 10750), 'pathway.stdlib.ml.index.KNNIndex', 'index.KNNIndex', (['chunked_docs.embedding', 'chunked_docs'], {'n_dimensions': 'self.embedding_dimension', 'metadata': "chunked_docs.data['metadata']"}), "(chunked_docs.embedding, chunked_docs, n_dimensions=self.\n embedding_dimension, metadata=chunked_docs.data['metadata'], **self.\n index_params)\n", (10602, 10750), False, 'from pathway.stdlib.ml import index\n'), ((12607, 12702), 'pathway.column_definition', 'pw.column_definition', ([], {'default_value': 'None', 'description': '"""Metadata filter in JMESPath format"""'}), "(default_value=None, description=\n 'Metadata filter in JMESPath format')\n", (12627, 12702), True, 'import pathway as pw\n'), ((12763, 12866), 'pathway.column_definition', 'pw.column_definition', ([], {'default_value': 'None', 'description': '"""An optional Glob pattern for the file path"""'}), "(default_value=None, description=\n 'An optional Glob pattern for the file path')\n", (12783, 12866), True, 'import pathway as pw\n'), ((15276, 15397), 'pathway.column_definition', 'pw.column_definition', ([], {'description': '"""Your query for the similarity search"""', 'example': '"""Pathway data processing framework"""'}), "(description='Your query for the similarity search',\n example='Pathway data processing framework')\n", (15296, 15397), True, 'import pathway as pw\n'), ((15446, 15531), 'pathway.column_definition', 'pw.column_definition', ([], {'description': '"""The number of documents to provide"""', 'example': '(2)'}), "(description='The number of documents to provide',\n example=2)\n", (15466, 15531), True, 'import pathway as pw\n'), ((15588, 15683), 'pathway.column_definition', 'pw.column_definition', ([], {'default_value': 'None', 'description': '"""Metadata filter in JMESPath format"""'}), "(default_value=None, description=\n 'Metadata filter in JMESPath format')\n", (15608, 15683), True, 'import pathway as pw\n'), ((15744, 15847), 'pathway.column_definition', 'pw.column_definition', ([], {'default_value': 'None', 'description': '"""An optional Glob pattern for the file path"""'}), "(default_value=None, description=\n 'An optional Glob pattern for the file path')\n", (15764, 15847), True, 'import pathway as pw\n'), ((17383, 17427), 'pathway.persistence.Backend.filesystem', 'pw.persistence.Backend.filesystem', (['"""./Cache"""'], {}), "('./Cache')\n", (17416, 17427), True, 'import pathway as pw\n'), ((18253, 18318), 'pathway.io.http.PathwayWebserver', 'pw.io.http.PathwayWebserver', ([], {'host': 'host', 'port': 'port', 'with_cors': '(True)'}), '(host=host, port=port, with_cors=True)\n', (18280, 18318), True, 'import pathway as pw\n'), ((7862, 7913), 'llama_index.core.ingestion.pipeline.run_transformations', 'run_transformations', (['starting_node', 'transformations'], {}), '(starting_node, transformations)\n', (7881, 7913), False, 'from llama_index.core.ingestion.pipeline import run_transformations\n'), ((12309, 12326), 'pathway.Json', 'pw.Json', (['response'], {}), '(response)\n', (12316, 12326), True, 'import pathway as pw\n'), ((18446, 18640), 'pathway.io.http.rest_connector', 'pw.io.http.rest_connector', ([], {'webserver': 'webserver', 'route': 'route', 'methods': "('GET', 'POST')", 'schema': 'schema', 'autocommit_duration_ms': '(50)', 'delete_completed_queries': '(True)', 'documentation': 'documentation'}), "(webserver=webserver, route=route, methods=('GET',\n 'POST'), schema=schema, autocommit_duration_ms=50,\n delete_completed_queries=True, documentation=documentation)\n", (18471, 18640), True, 'import pathway as pw\n'), ((18924, 19132), 'pathway.io.http.EndpointDocumentation', 'pw.io.http.EndpointDocumentation', ([], {'summary': '"""Do a similarity search for your query"""', 'description': '"""Request the given number of documents from the realtime-maintained index."""', 'method_types': "('GET',)"}), "(summary=\n 'Do a similarity search for your query', description=\n 'Request the given number of documents from the realtime-maintained index.'\n , method_types=('GET',))\n", (18956, 19132), True, 'import pathway as pw\n'), ((19343, 19634), 'pathway.io.http.EndpointDocumentation', 'pw.io.http.EndpointDocumentation', ([], {'summary': '"""Get current indexer stats"""', 'description': '"""Request for the basic stats of the indexer process. It returns the number of documents that are currently present in the indexer and the time the last of them was added."""', 'method_types': "('GET',)"}), "(summary='Get current indexer stats',\n description=\n 'Request for the basic stats of the indexer process. It returns the number of documents that are currently present in the indexer and the time the last of them was added.'\n , method_types=('GET',))\n", (19375, 19634), True, 'import pathway as pw\n'), ((19853, 20074), 'pathway.io.http.EndpointDocumentation', 'pw.io.http.EndpointDocumentation', ([], {'summary': '"""Get indexed documents list"""', 'description': '"""Request for the list of documents present in the indexer. It returns the list of metadata objects."""', 'method_types': "('GET',)"}), "(summary='Get indexed documents list',\n description=\n 'Request for the list of documents present in the indexer. It returns the list of metadata objects.'\n , method_types=('GET',))\n", (19885, 20074), True, 'import pathway as pw\n'), ((20655, 20747), 'pathway.run', 'pw.run', ([], {'monitoring_level': 'pw.MonitoringLevel.NONE', 'persistence_config': 'persistence_config'}), '(monitoring_level=pw.MonitoringLevel.NONE, persistence_config=\n persistence_config)\n', (20661, 20747), True, 'import pathway as pw\n'), ((20828, 20882), 'threading.Thread', 'threading.Thread', ([], {'target': 'run', 'name': '"""VectorStoreServer"""'}), "(target=run, name='VectorStoreServer')\n", (20844, 20882), False, 'import threading\n'), ((6736, 6752), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'x'}), '(text=x)\n', (6744, 6752), False, 'from llama_index.core.schema import BaseNode, MetadataMode, TextNode\n'), ((10277, 10295), 'numpy.asarray', 'np.asarray', (['result'], {}), '(result)\n', (10287, 10295), True, 'import numpy as np\n'), ((10430, 10448), 'numpy.asarray', 'np.asarray', (['result'], {}), '(result)\n', (10440, 10448), True, 'import numpy as np\n'), ((11133, 11152), 'pathway.reducers.count', 'pw.reducers.count', ([], {}), '()\n', (11150, 11152), True, 'import pathway as pw\n'), ((11180, 11213), 'pathway.reducers.max', 'pw.reducers.max', (['pw.this.modified'], {}), '(pw.this.modified)\n', (11195, 11213), True, 'import pathway as pw\n'), ((11240, 11272), 'pathway.reducers.max', 'pw.reducers.max', (['pw.this.indexed'], {}), '(pw.this.indexed)\n', (11255, 11272), True, 'import pathway as pw\n'), ((11292, 11323), 'pathway.reducers.tuple', 'pw.reducers.tuple', (['pw.this.path'], {}), '(pw.this.path)\n', (11309, 11323), True, 'import pathway as pw\n'), ((14203, 14239), 'pathway.reducers.tuple', 'pw.reducers.tuple', (['pw.this._metadata'], {}), '(pw.this._metadata)\n', (14220, 14239), True, 'import pathway as pw\n'), ((20423, 20527), 'pathway.persistence.Config.simple_config', 'pw.persistence.Config.simple_config', (['cache_backend'], {'persistence_mode': 'pw.PersistenceMode.UDF_CACHING'}), '(cache_backend, persistence_mode=pw.\n PersistenceMode.UDF_CACHING)\n', (20458, 20527), True, 'import pathway as pw\n'), ((22847, 22863), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (22857, 22863), False, 'import json\n'), ((14720, 14793), 'jmespath.search', 'jmespath.search', (['metadata_filter', 'm.value'], {'options': '_knn_lsh._glob_options'}), '(metadata_filter, m.value, options=_knn_lsh._glob_options)\n', (14735, 14793), False, 'import jmespath\n'), ((4913, 4937), 'langchain_core.documents.Document', 'Document', ([], {'page_content': 'x'}), '(page_content=x)\n', (4921, 4937), False, 'from langchain_core.documents import Document\n')] |
import logging
import llama_index
import graphsignal
from graphsignal.recorders.base_recorder import BaseRecorder
from graphsignal.recorders.instrumentation import parse_semver, compare_semver
from graphsignal.proto import signals_pb2
from graphsignal.recorders.instrumentation import patch_method
logger = logging.getLogger('graphsignal')
class LlamaIndexRecorder(BaseRecorder):
def __init__(self):
self._library_version = None
self._v1_handler = None
def setup(self):
if not graphsignal._tracer.auto_instrument:
return
version = ''
if hasattr(llama_index, '__version__') and llama_index.__version__:
version = llama_index.__version__
self._library_version = version
def is_v1():
return (
hasattr(llama_index, 'indices') and hasattr(llama_index.indices.service_context, 'ServiceContext')
)
def is_v2():
return self._library_version and compare_semver(self._library_version, (0, 10, 10)) >= 0
if is_v2():
# the handler should be added manually for now
pass
elif is_v1():
from graphsignal.callbacks.llama_index.v1 import GraphsignalCallbackHandler
from llama_index.indices.service_context import ServiceContext
def after_from_defaults(args, kwargs, ret, exc, context):
if isinstance(ret, ServiceContext):
if not any(isinstance(handler, GraphsignalCallbackHandler) for handler in ret.callback_manager.handlers):
ret.callback_manager.add_handler(GraphsignalCallbackHandler())
else:
logger.error(f'Cannot add callback for LlamaIndex {version}')
if not patch_method(ServiceContext, 'from_defaults', after_func=after_from_defaults):
logger.error(f'Cannot instrument LlamaIndex {version}')
else:
logger.error(f'Cannot auto-instrument LlamaIndex {version}')
def shutdown(self):
if self._v1_handler:
llama_index.callbacks.get_callback_manager().remove_handler(self._v1_handler)
self._v1_handler = None
def on_span_read(self, span, context):
if self._library_version:
entry = span.config.add()
entry.key = 'llama_index'
entry.value = self._library_version
| [
"llama_index.callbacks.get_callback_manager"
] | [((309, 341), 'logging.getLogger', 'logging.getLogger', (['"""graphsignal"""'], {}), "('graphsignal')\n", (326, 341), False, 'import logging\n'), ((997, 1047), 'graphsignal.recorders.instrumentation.compare_semver', 'compare_semver', (['self._library_version', '(0, 10, 10)'], {}), '(self._library_version, (0, 10, 10))\n', (1011, 1047), False, 'from graphsignal.recorders.instrumentation import parse_semver, compare_semver\n'), ((1793, 1870), 'graphsignal.recorders.instrumentation.patch_method', 'patch_method', (['ServiceContext', '"""from_defaults"""'], {'after_func': 'after_from_defaults'}), "(ServiceContext, 'from_defaults', after_func=after_from_defaults)\n", (1805, 1870), False, 'from graphsignal.recorders.instrumentation import patch_method\n'), ((2097, 2141), 'llama_index.callbacks.get_callback_manager', 'llama_index.callbacks.get_callback_manager', ([], {}), '()\n', (2139, 2141), False, 'import llama_index\n'), ((1640, 1668), 'graphsignal.callbacks.llama_index.v1.GraphsignalCallbackHandler', 'GraphsignalCallbackHandler', ([], {}), '()\n', (1666, 1668), False, 'from graphsignal.callbacks.llama_index.v1 import GraphsignalCallbackHandler\n')] |
import llama_index
from llama_index.readers.file import CSVReader
from llama_index.readers.file import DocxReader
from llama_index.readers.file import EpubReader
from llama_index.readers.file import FlatReader
from llama_index.readers.file import HTMLTagReader
from llama_index.readers.file import HWPReader
from llama_index.readers.file import IPYNBReader
from llama_index.readers.file import ImageCaptionReader
from llama_index.readers.file import ImageReader
from llama_index.readers.file import ImageTabularChartReader
from llama_index.readers.file import ImageVisionLLMReader
from llama_index.readers.file import MarkdownReader
from llama_index.readers.file import MboxReader
from llama_index.readers.file import PDFReader
from llama_index.readers.file import PagedCSVReader
from llama_index.readers.file import PandasCSVReader
from llama_index.readers.file import PptxReader
from llama_index.readers.file import PyMuPDFReader
from llama_index.readers.file import RTFReader
from llama_index.readers.file import UnstructuredReader
from llama_index.readers.file import VideoAudioReader
from llama_index.readers.file import XMLReader
from llama_index.readers.chroma import ChromaReader
from llama_index.readers.web import AsyncWebPageReader
from llama_index.readers.web import BeautifulSoupWebReader
from llama_index.readers.web import KnowledgeBaseWebReader
from llama_index.readers.web import MainContentExtractorReader
from llama_index.readers.web import NewsArticleReader
from llama_index.readers.web import ReadabilityWebPageReader
from llama_index.readers.web import RssNewsReader
from llama_index.readers.web import RssReader
from llama_index.readers.web import SimpleWebPageReader
from llama_index.readers.web import SitemapReader
from llama_index.readers.web import TrafilaturaWebReader
from llama_index.readers.web import UnstructuredURLLoader
from llama_index.readers.web import WholeSiteReader
from langchain_core.documents.base import Document
####LlamaParse
import llama_parse
from llama_parse import LlamaParse
from llama_index.core import SimpleDirectoryReader
import random
from typing import List, Optional
from pydantic import BaseModel
import dspy
import gradio as gr
import dspy
from dspy.retrieve.chromadb_rm import ChromadbRM
from dspy.evaluate import Evaluate
from dspy.datasets.hotpotqa import HotPotQA
from dspy.teleprompt import BootstrapFewShotWithRandomSearch, BootstrapFinetune
from dsp.modules.lm import LM
from dsp.utils.utils import deduplicate
import os
import dotenv
from dotenv import load_dotenv, set_key
from pathlib import Path
from typing import Any, List, Dict
import base64
import chromadb
# Define constants and configurations
NUM_THREADS = 4 # Example constant, adjust according to your actual configuration
RECOMPILE_INTO_MODEL_FROM_SCRATCH = False # Example flag
# ## LOADING DATA
# %load_ext autoreload
# %autoreload 2
# %set_env CUDA_VISIBLE_DEVICES=7
# import sys; sys.path.append('/future/u/okhattab/repos/public/stanfordnlp/dspy')
# Assume all necessary imports for llama_index readers are correctly done at the beginning
ports = [7140, 7141, 7142, 7143, 7144, 7145]
#llamaChat = dspy.HFClientTGI(model="meta-llama/Llama-2-13b-chat-hf", port=ports, max_tokens=150) (DELETED)
# colbertv2 = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
class APIKeyManager:
@staticmethod
def set_api_keys(anthropic_api_key: str, openai_api_key: str):
"""
Function to securely set API keys by updating the .env file in the application's directory.
This approach ensures that sensitive information is not hard-coded into the application.
"""
print("Setting API keys...")
# Define the path to the .env file
env_path = Path('.') / '.env'
print(f"Loading existing .env file from: {env_path}")
# Load existing .env file or create one if it doesn't exist
load_dotenv(dotenv_path=env_path, override=True)
print("Updating .env file with new API keys...")
# Update the .env file with the new values
set_key(env_path, "ANTHROPIC_API_KEY", anthropic_api_key)
set_key(env_path, "OPENAI_API_KEY", openai_api_key)
print("API keys updated successfully.")
# Returns a confirmation without exposing the keys
return "API keys updated successfully in .env file. Please proceed with your operations."
@staticmethod
def load_api_keys_and_prompts():
"""
Loads API keys and prompts from an existing .env file into the application's environment.
This function assumes the .env file is located in the same directory as the script.
"""
print("Loading API keys and prompts...")
# Define the path to the .env file
env_path = Path('.') / '.env'
print(f"Loading .env file from: {env_path}")
# Load the .env file
load_dotenv(dotenv_path=env_path)
print("Accessing variables from the environment...")
# Access the variables from the environment
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
openai_api_key = os.getenv("OPENAI_API_KEY")
field_prompt = os.getenv("FIELDPROMPT")
example_prompt = os.getenv("EXAMPLEPROMPT")
description_prompt = os.getenv("DESCRIPTIONPROMPT")
print("API keys and prompts loaded successfully.")
# Optionally, print a confirmation or return the loaded values
return {
"ANTHROPIC_API_KEY": anthropic_api_key,
"OPENAI_API_KEY": openai_api_key,
"FIELDPROMPT": field_prompt,
"EXAMPLEPROMPT": example_prompt,
"DESCRIPTIONPROMPT": description_prompt
}
class DataProcessor:
def __init__(self, source_file: str, collection_name: str, persist_directory: str):
self.source_file = source_file
self.collection_name = collection_name
self.persist_directory = persist_directory
def load_data_from_source_and_store(self) -> Any:
# def load_data_from_source_and_store(source: Union[str, dict], collection_name: str, persist_directory: str) -> Any:
"""
Loads data from various sources and stores the data in ChromaDB.
:param source: A string representing a file path or a URL, or a dictionary specifying web content to fetch.
:param collection_name: Name of the ChromaDB collection to store the data.
:param persist_directory: Path to the directory where ChromaDB data will be persisted.
:return: Loaded data.
"""
# Determine the file extension
if isinstance(self.source_file, str):
ext = os.path.splitext(self.source_file)[-1].lower()
else:
raise TypeError("Source must be a string (file path or URL).")
# Load data using appropriate reader
if ext == '.csv':
reader = CSVReader()
elif ext == '.docx':
reader = DocxReader()
elif ext == '.epub':
reader = EpubReader()
elif ext == '.html':
reader = HTMLTagReader()
elif ext == '.hwp':
reader = HWPReader()
elif ext == '.ipynb':
reader = IPYNBReader()
elif ext in ['.png', '.jpg', '.jpeg']:
reader = ImageReader() # Assuming ImageReader can handle common image formats
elif ext == '.md':
reader = MarkdownReader()
elif ext == '.mbox':
reader = MboxReader()
elif ext == '.pdf':
reader = PDFReader()
elif ext == '.pptx':
reader = PptxReader()
elif ext == '.rtf':
reader = RTFReader()
elif ext == '.xml':
reader = XMLReader()
elif self.source_file.startswith('http'):
reader = AsyncWebPageReader() # Simplified assumption for URLs
else:
raise ValueError(f"Unsupported source type: {self.source_file}")
# Use the reader to load data
# data = reader.read(self.source_file) # Adjust method name as necessary
data = reader.load_data(self.source_file) # Adjust method name as necessary
chroma_client = chromadb.Client()
collection = chroma_client.create_collection(name=self.collection_name)
collection.add(
documents=[i.text for i in data], # the text fields
metadatas=[i.extra_info for i in data], # the metadata
ids=[i.doc_id for i in data], # the generated ids
)
# Store the data in ChromaDB
# retriever_model = ChromadbRM(self.collection_name, self.persist_directory)
# retriever_model(data)
return data
def choose_reader(full_path:str):
"""
Loads data from various sources and stores the data in ChromaDB.
:param source: A string representing a file path or a URL, or a dictionary specifying web content to fetch.
"""
# Determine the file extension
if isinstance(full_path, str):
ext = os.path.splitext(full_path)[-1].lower()
else:
raise TypeError("Source must be a string (file path or URL).")
# Load data using appropriate reader
if ext == '.csv':
reader = CSVReader()
elif ext == '.docx':
reader = DocxReader()
elif ext == '.epub':
reader = EpubReader()
elif ext == '.html':
reader = HTMLTagReader()
elif ext == '.hwp':
reader = HWPReader()
elif ext == '.ipynb':
reader = IPYNBReader()
elif ext in ['.png', '.jpg', '.jpeg']:
reader = ImageReader() # Assuming ImageReader can handle common image formats
elif ext == '.md':
reader = MarkdownReader()
elif ext == '.mbox':
reader = MboxReader()
elif ext == '.pdf':
reader = PDFReader()
elif ext == '.pptx':
reader = PptxReader()
elif ext == '.rtf':
reader = RTFReader()
elif ext == '.xml':
reader = XMLReader()
elif full_path.startswith('http'):
reader = AsyncWebPageReader() # Simplified assumption for URLs
else:
raise ValueError(f"Unsupported source type: {full_path}")
# Use the reader to load data
data = reader.read(full_path) # Adjust method name as necessary
return data
class DocumentLoader:
@staticmethod
def load_documents_from_folder(folder_path: str) -> List[Document]:
"""Loads documents from files within a specified folder"""
folder_path = "./add_your_files_here"
documents = []
for root, _, filenames in os.walk(folder_path):
for filename in filenames:
full_path = os.path.join(root, filename)
reader = choose_reader(full_path)
x=0
if reader:
print(f"Loading document from '{filename}' with {type(reader).__name__}")
try:
docs = list(reader.load_data(input_files=[full_path]))
documents.extend(docs)
except Exception as e:
print(f"Failed to load document from '{filename}'. Error: {e}")
# Convert to langchain format
documents = [ doc.to_langchain_format()
for doc in documents
]
return documents
### DSPY DATA GENERATOR
# class descriptionSignature(dspy.Signature):
# load_dotenv()
# field_prompt = os.getenv('FIELDPROMPT', 'Default field prompt if not set')
# example_prompt = os.getenv('EXAMPLEPROMPT', 'Default example prompt if not set')
# description_prompt = os.getenv('DESCRIPTIONPROMPT', 'Default description prompt if not set')
# field_name = dspy.InputField(desc=field_prompt)
# example = dspy.InputField(desc=example_prompt)
# description = dspy.OutputField(desc=description_prompt)
load_dotenv()
# https://github.com/stanfordnlp/dspy?tab=readme-ov-file#4-two-powerful-concepts-signatures--teleprompters
class DescriptionSignature(dspy.Signature):
"""Write a simple search query that will help answer a complex question."""
context = dspy.InputField(desc="may contain relevant facts")
question = dspy.InputField()
query = dspy.OutputField()
class SyntheticDataGenerator:
def __init__(self, schema_class: Optional[BaseModel] = None, examples: Optional[List[dspy.Example]] = None):
self.schema_class = schema_class
self.examples = examples
print("SyntheticDataGenerator initialized.")
def generate(self, sample_size: int) -> List[dspy.Example]:
print(f"Starting data generation for sample size: {sample_size}")
if not self.schema_class and not self.examples:
raise ValueError("Either a schema_class or examples must be provided.")
if self.examples and len(self.examples) >= sample_size:
print("No additional data generation needed.")
return self.examples[:sample_size]
additional_samples_needed = sample_size - (len(self.examples) if self.examples else 0)
print(f"Generating {additional_samples_needed} additional samples.")
generated_examples = self._generate_additional_examples(additional_samples_needed)
return self.examples + generated_examples if self.examples else generated_examples
def _define_or_infer_fields(self):
print("Defining or inferring fields for data generation.")
if self.schema_class:
data_schema = self.schema_class.model_json_schema()
properties = data_schema['properties']
elif self.examples:
inferred_schema = self.examples[0].__dict__['_store']
descriptor = dspy.Predict(DescriptionSignature)
properties = {field: {'description': str((descriptor(field_name=field, example=str(inferred_schema[field]))).description)}
for field in inferred_schema.keys()}
else:
properties = {}
return properties
def _generate_additional_examples(self, additional_samples_needed: int) -> List[dspy.Example]:
print(f"Generating {additional_samples_needed} additional examples.")
properties = self._define_or_infer_fields()
class_name = f"{self.schema_class.__name__ if self.schema_class else 'Inferred'}Signature"
fields = self._prepare_fields(properties)
signature_class = type(class_name, (dspy.Signature,), fields)
generator = dspy.Predict(signature_class, n=additional_samples_needed)
response = generator(sindex=str(random.randint(1, additional_samples_needed)))
return [dspy.Example({field_name: getattr(completion, field_name) for field_name in properties.keys()})
for completion in response.completions]
def _prepare_fields(self, properties) -> dict:
print("Preparing fields for the signature class.")
return {
'__doc__': f"Generates the following outputs: {{{', '.join(properties.keys())}}}.",
'sindex': dspy.InputField(desc="a random string"),
**{field_name: dspy.OutputField(desc=properties[field_name].get('description', 'No description'))
for field_name in properties.keys()},
}
# # Generating synthetic data via existing examples
# generator = SyntheticDataGenerator(examples=existing_examples)
# dataframe = generator.generate(sample_size=5)
class ClaudeModelManager:
def __init__(self, model: str = "claude-3-opus-20240229", api_key: Optional[str] = None, api_base: Optional[str] = None):
self.model = model
self.api_key = api_key
self.api_base = api_base
self.initialize_claude()
def initialize_claude(self):
"""Wrapper around anthropic's API. Supports both the Anthropic and Azure APIs."""
def __init__(
self,
model: str = "claude-3-opus-20240229",
api_key: Optional[str] = None,
api_base: Optional[str] = None,
**kwargs,
):
print("Initializing Claude...")
super().__init__(model)
try:
from anthropic import Anthropic, RateLimitError
print("Successfully imported Anthropics's API client.")
except ImportError as err:
print("Failed to import Anthropics's API client.")
raise ImportError("Claude requires `pip install anthropic`.") from err
self.provider = "anthropic"
self.api_key = os.environ.get("ANTHROPIC_API_KEY") if api_key is None else api_key
if self.api_key:
print("API key is set.")
else:
print("API key is not set. Please ensure it's provided or set in the environment variables.")
self.api_base = BASE_URL if api_base is None else api_base
print(f"API base URL is set to: {self.api_base}")
self.kwargs = {
"temperature": 0.0 if "temperature" not in kwargs else kwargs["temperature"],
"max_tokens": min(kwargs.get("max_tokens", 4096), 4096),
"top_p": 1.0 if "top_p" not in kwargs else kwargs["top_p"],
"top_k": 1 if "top_k" not in kwargs else kwargs["top_k"],
"n": kwargs.pop("n", kwargs.pop("num_generations", 1)),
**kwargs,
}
self.kwargs["model"] = model
print(f"Model parameters set: {self.kwargs}")
# self.history: List[dict[str, Any]] = []
self.history = [] # changed to be commatible with older versions
self.client = Anthropic(api_key=self.api_key)
print("Anthropic client initialized.")
class SyntheticDataHandler:
def __init__(self, examples: Optional[List[dspy.Example]] = None):
self.generator = SyntheticDataGenerator(examples=examples)
def generate_data(self, sample_size: int):
return self.generator.generate(sample_size=sample_size)
class ClaudeModelConfig:
def __init__(self, model_name):
self.model = model_name
def get_model(self):
return Claude(model=self.model)
def configure_dspy_settings(lm_model):
dspy.settings.configure(rm=colbertv2, lm=lm_model)
class DatasetPreparation:
@staticmethod
def prepare_datasets(dataset):
trainset = [x.with_inputs('question') for x in dataset.train]
devset = [x.with_inputs('question') for x in dataset.dev]
testset = [x.with_inputs('question') for x in dataset.test]
return trainset, devset, testset
# class BasicMH(dspy.Module):
# def __init__(self, claude_model, passages_per_hop=3):
# super().__init__()
# self.claude_model = claude_model
# self.passages_per_hop = passages_per_hop
# def forward(self, question):
# context = []
# for hop in range(2):
# search_results = self.claude_model.search(question, context=context, k=self.passages_per_hop)
# passages = [result.passage for result in search_results]
# context = self.deduplicate(context + passages)
# answer = self.claude_model.generate(context=context, question=question)
# return answer
# @staticmethod
# def deduplicate(passages):
# return list(dict.fromkeys(passages))
class ModelCompilationAndEnsemble:
@staticmethod
def compile_or_load_models(recompile, trainset, num_models=4):
ensemble = []
if recompile:
metric_EM = dspy.evaluate.answer_exact_match
tp = BootstrapFewShotWithRandomSearch(metric=metric_EM, max_bootstrapped_demos=2, num_threads=NUM_THREADS)
claude_bs = tp.compile(Claude(), trainset=trainset[:50], valset=trainset[50:200])
ensemble = [prog for *_, prog in claude_bs.candidate_programs[:num_models]]
else:
for idx in range(num_models):
claude_model = Claude(model=f'multihop_claude3opus_{idx}.json')
ensemble.append(claude_model)
return ensemble
# # # Instantiate Claude with desired parameters
# # claude_model = Claude(model="claude-3-opus-20240229")
# # # Configure dspy settings with Claude as the language model
# # dspy.settings.configure(rm=colbertv2, lm=claude_model)
# # #dspy.settings.configure(rm=colbertv2, lm=llamaChat) #Llama change into model based on line 166
# # dataset = dataframe
# # trainset = [x.with_inputs('question') for x in dataset.train]
# # devset = [x.with_inputs('question') for x in dataset.dev]
# # testset = [x.with_inputs('question') for x in dataset.test]
# # #len(trainset), len(devset), len(testset)
# # #trainset[0]
# class BasicMH(dspy.Module):
# def __init__(self, claude_model, passages_per_hop=3):
# super().__init__()
# self.claude_model = claude_model
# self.passages_per_hop = passages_per_hop
# def forward(self, question):
# context = []
# for hop in range(2):
# # Retrieval using Claude model
# search_results = self.claude_model.search(question, context=context, k=self.passages_per_hop)
# passages = [result.passage for result in search_results]
# context = deduplicate(context + passages)
# # Generation using Claude model
# answer = self.claude_model.generate(context=context, question=question)
# return answer
# metric_EM = dspy.evaluate.answer_exact_match
# if RECOMPILE_INTO_MODEL_FROM_SCRATCH:
# tp = BootstrapFewShotWithRandomSearch(metric=metric_EM, max_bootstrapped_demos=2, num_threads=NUM_THREADS)
# # Compile the Claude model using BootstrapFewShotWithRandomSearch
# claude_bs = tp.compile(Claude(), trainset=trainset[:50], valset=trainset[50:200])
# # Get the compiled programs
# ensemble = [prog for *_, prog in claude_bs.candidate_programs[:4]]
# for idx, prog in enumerate(ensemble):
# # Save the compiled Claude models if needed
# # prog.save(f'multihop_llama213b_{idx}.json')
# pass
# else:
# ensemble = []
# for idx in range(4):
# # Load the previously trained Claude models
# claude_model = Claude(model=f'multihop_claude3opus_{idx}.json') #need to prepare this .json file
# ensemble.append(claude_model)
# # Select the first Claude model from the ensemble
# claude_program = ensemble[0]
# Add this class definition to your app.py
class ChatbotManager:
def __init__(self):
self.models = self.load_models()
self.history = []
def load_models(self):
pass
# return models
def generate_response(self, text, image, model_select_dropdown, top_p, temperature, repetition_penalty, max_length_tokens, max_context_length_tokens):
return gradio_chatbot_output, self.history, "Generate: Success"
def generate_prompt_with_history( text, history, max_length=2048):
"""
Generate a prompt with history for the deepseek application.
Args:
text (str): The text prompt.
history (list): List of previous conversation messages.
max_length (int): The maximum length of the prompt.
Returns:
tuple: A tuple containing the generated prompt, conversation, and conversation copy. If the prompt could not be generated within the max_length limit, returns None.
"""
user_role_ind = 0
bot_role_ind = 1
# Initialize conversation
conversation = ""# ADD DSPY HERE vl_chat_processor.new_chat_template()
if history:
conversation.messages = history
# if image is not None:
# if "<image_placeholder>" not in text:
# text = (
# "<image_placeholder>" + "\n" + text
# ) # append the <image_placeholder> in a new line after the text prompt
# text = (text, image)
conversation.append_message(conversation.roles[user_role_ind], text)
conversation.append_message(conversation.roles[bot_role_ind], "")
# Create a copy of the conversation to avoid history truncation in the UI
conversation_copy = conversation.copy()
logger.info("=" * 80)
logger.info(get_prompt(conversation))
rounds = len(conversation.messages) // 2
for _ in range(rounds):
current_prompt = get_prompt(conversation)
# current_prompt = (
# current_prompt.replace("</s>", "")
# if sft_format == "deepseek"
# else current_prompt
# )
# if current_prompt.count("<image_placeholder>") > 2:
# for _ in range(len(conversation_copy.messages) - 2):
# conversation_copy.messages.pop(0)
# return conversation_copy
# if torch.tensor(tokenizer.encode(current_prompt)).size(-1) <= max_length:
# return conversation_copy
if len(conversation.messages) % 2 != 0:
gr.Error("The messages between user and assistant are not paired.")
return
try:
for _ in range(2): # pop out two messages in a row
conversation.messages.pop(0)
except IndexError:
gr.Error("Input text processing failed, unable to respond in this round.")
return None
gr.Error("Prompt could not be generated within max_length limit.")
return None
def to_gradio_chatbot(conv):
"""Convert the conversation to gradio chatbot format."""
ret = []
for i, (role, msg) in enumerate(conv.messages[conv.offset :]):
if i % 2 == 0:
if type(msg) is tuple:
msg, image = msg
msg = msg
if isinstance(image, str):
with open(image, "rb") as f:
data = f.read()
img_b64_str = base64.b64encode(data).decode()
image_str = f'<video src="data:video/mp4;base64,{img_b64_str}" controls width="426" height="240"></video>'
msg = msg.replace("\n".join(["<image_placeholder>"] * 4), image_str)
else:
max_hw, min_hw = max(image.size), min(image.size)
aspect_ratio = max_hw / min_hw
max_len, min_len = 800, 400
shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
longest_edge = int(shortest_edge * aspect_ratio)
W, H = image.size
if H > W:
H, W = longest_edge, shortest_edge
else:
H, W = shortest_edge, longest_edge
image = image.resize((W, H))
buffered = BytesIO()
image.save(buffered, format="JPEG")
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="user upload image" />'
msg = msg.replace("<image_placeholder>", img_str)
ret.append([msg, None])
else:
ret[-1][-1] = msg
return ret
def to_gradio_history(conv):
"""Convert the conversation to gradio history state."""
return conv.messages[conv.offset :]
def get_prompt(conv) -> str:
"""Get the prompt for generation."""
system_prompt = conv.system_template.format(system_message=conv.system_message)
if conv.sep_style == SeparatorStyle.DeepSeek:
seps = [conv.sep, conv.sep2]
if system_prompt == "" or system_prompt is None:
ret = ""
else:
ret = system_prompt + seps[0]
for i, (role, message) in enumerate(conv.messages):
if message:
if type(message) is tuple: # multimodal message
message, _ = message
ret += role + ": " + message + seps[i % 2]
else:
ret += role + ":"
return ret
else:
return conv.get_prompt
def predict(text, chatbot, history, top_p, temperature, repetition_penalty, max_length_tokens, max_context_length_tokens, model_select_dropdown,):
"""
Function to predict the response based on the user's input and selected model.
Parameters:
user_text (str): The input text from the user.
user_image (str): The input image from the user.
chatbot (str): The chatbot's name.
history (str): The history of the chat.
top_p (float): The top-p parameter for the model.
temperature (float): The temperature parameter for the model.
max_length_tokens (int): The maximum length of tokens for the model.
max_context_length_tokens (int): The maximum length of context tokens for the model.
model_select_dropdown (str): The selected model from the dropdown.
Returns:
generator: A generator that yields the chatbot outputs, history, and status.
"""
print("running the prediction function")
# try:
# tokenizer, vl_gpt, vl_chat_processor = models[model_select_dropdown]
# if text == "":
# yield chatbot, history, "Empty context."
# return
# except KeyError:
# yield [[text, "No Model Found"]], [], "No Model Found"
# return
conversation = generate_prompt_with_history(
text,
image,
history,
max_length=max_context_length_tokens,
)
prompts = convert_conversation_to_prompts(conversation)
gradio_chatbot_output = to_gradio_chatbot(conversation)
# full_response = ""
# with torch.no_grad():
# for x in deepseek_generate(
# prompts=prompts,
# vl_gpt=vl_gpt,
# vl_chat_processor=vl_chat_processor,
# tokenizer=tokenizer,
# stop_words=stop_words,
# max_length=max_length_tokens,
# temperature=temperature,
# repetition_penalty=repetition_penalty,
# top_p=top_p,
# ):
# full_response += x
# response = strip_stop_words(full_response, stop_words)
# conversation.update_last_message(response)
# gradio_chatbot_output[-1][1] = response
# yield gradio_chatbot_output, to_gradio_history(
# conversation
# ),
"Generating..."
print("flushed result to gradio")
# torch.cuda.empty_cache()
# if is_variable_assigned("x"):
# print(f"{model_select_dropdown}:\n{text}\n{'-' * 80}\n{x}\n{'=' * 80}")
# print(
# f"temperature: {temperature}, top_p: {top_p}, repetition_penalty: {repetition_penalty}, max_length_tokens: {max_length_tokens}"
# )
yield gradio_chatbot_output, to_gradio_history(conversation), "Generate: Success"
def retry(
text,
image,
chatbot,
history,
top_p,
temperature,
repetition_penalty,
max_length_tokens,
max_context_length_tokens,
model_select_dropdown,
):
if len(history) == 0:
yield (chatbot, history, "Empty context")
return
chatbot.pop()
history.pop()
text = history.pop()[-1]
if type(text) is tuple:
text, image = text
yield from predict(
text,
chatbot,
history,
top_p,
temperature,
repetition_penalty,
max_length_tokens,
max_context_length_tokens,
model_select_dropdown,
)
class Application:
def __init__(self):
self.api_key_manager = APIKeyManager()
# self.data_processor = DataProcessor(source_file="", collection_name="adapt-a-rag", persist_directory="/your_files_here")
self.data_processor = DataProcessor(source_file="", collection_name="adapt-a-rag", persist_directory="your_files_here")
self.claude_model_manager = ClaudeModelManager()
self.synthetic_data_handler = SyntheticDataHandler()
self.chatbot_manager = ChatbotManager()
def set_api_keys(self, anthropic_api_key, openai_api_key):
return self.api_key_manager.set_api_keys(anthropic_api_key, openai_api_key)
def handle_file_upload(self, uploaded_file):
self.data_processor.source_file = uploaded_file.name
loaded_data = self.data_processor.load_data_from_source_and_store()
print("Data from {uploaded_file.name} loaded and stored successfully.")
return loaded_data
def handle_synthetic_data(self, schema_class_name, sample_size):
synthetic_data = self.synthetic_data_handler.generate_data(sample_size=int(sample_size))
synthetic_data_str = "\n".join([str(data) for data in synthetic_data])
print ("Generated {sample_size} synthetic data items:\n{synthetic_data_str}")
return synthetic_data
def handle_chatbot_interaction(self, text, model_select, top_p, temperature, repetition_penalty, max_length_tokens, max_context_length_tokens):
chatbot_response, history, status = self.chatbot_manager.generate_response(text, None, model_select, top_p, temperature, repetition_penalty, max_length_tokens, max_context_length_tokens)
return chatbot_response
def main(self):
with gr.Blocks() as demo:
with gr.Accordion("API Keys", open=True) as api_keys_accordion:
with gr.Row():
anthropic_api_key_input = gr.Textbox(label="Anthropic API Key", type="password")
openai_api_key_input = gr.Textbox(label="OpenAI API Key", type="password")
submit_button = gr.Button("Submit")
confirmation_output = gr.Textbox(label="Confirmation", visible=False)
submit_button.click(
fn=self.set_api_keys,
inputs=[anthropic_api_key_input, openai_api_key_input],
outputs=confirmation_output
)
with gr.Accordion("Upload Data") as upload_data_accordion:
file_upload = gr.File(label="Upload Data File")
file_upload_button = gr.Button("Process Uploaded File")
file_upload_output = gr.Textbox()
file_upload_button.click(
fn=self.handle_file_upload,
inputs=[file_upload],
outputs=file_upload_output
)
with gr.Accordion("Generate Synthetic Data") as generate_data_accordion:
schema_input = gr.Textbox(label="Schema Class Name")
sample_size_input = gr.Number(label="Sample Size", value=100)
synthetic_data_button = gr.Button("Generate Synthetic Data")
synthetic_data_output = gr.Textbox()
synthetic_data_button.click(
fn=self.handle_synthetic_data,
inputs=[schema_input, sample_size_input],
outputs=synthetic_data_output
)
with gr.Accordion("Chatbot") as chatbot_accordion:
text_input = gr.Textbox(label="Enter your question")
# model_select = gr.Dropdown(label="Select Model", choices=list(self.chatbot_manager.models.keys()))
model_select = gr.Dropdown(label="Select Model", choices=[ClaudeModelManager(api_key=os.getenv("ANTHROPIC_API_KEY"))])
top_p_input = gr.Slider(label="Top-p", minimum=0.0, maximum=1.0, value=0.95, step=0.01)
# top_p_input = gr.Slider()
temperature_input = gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, value=0.7, step=0.01)
repetition_penalty_input = gr.Slider(label="Repetition Penalty", minimum=1.0, maximum=2.0, value=1.1, step=0.1)
max_length_tokens_input = gr.Number(label="Max Length Tokens", value=2048)
max_context_length_tokens_input = gr.Number(label="Max Context Length Tokens", value=2048)
chatbot_output = gr.Chatbot(label="Chatbot Conversation")
submit_button = gr.Button("Submit")
submit_button.click(
fn=self.handle_chatbot_interaction,
inputs=[text_input, model_select, top_p_input, temperature_input, repetition_penalty_input, max_length_tokens_input, max_context_length_tokens_input],
outputs=chatbot_output
)
demo.launch()
if __name__ == "__main__":
app = Application()
app.main()
# Example usage
# source_file = "example.txt" # Replace with your source file path
# collection_name = "adapt-a-rag" #Need to be defined
# persist_directory = "/your_files_here" #Need to be defined
# loaded_data = load_data_from_source_and_store(source_file, collection_name="adapt-a-rag", persist_directory="/your_files_here")
# print("Data loaded and stored successfully in ChromaDB.")
| [
"llama_index.readers.file.HTMLTagReader",
"llama_index.readers.file.IPYNBReader",
"llama_index.readers.file.PptxReader",
"llama_index.readers.file.HWPReader",
"llama_index.readers.file.ImageReader",
"llama_index.readers.web.AsyncWebPageReader",
"llama_index.readers.file.XMLReader",
"llama_index.readers.file.MboxReader",
"llama_index.readers.file.PDFReader",
"llama_index.readers.file.RTFReader",
"llama_index.readers.file.CSVReader",
"llama_index.readers.file.DocxReader",
"llama_index.readers.file.EpubReader",
"llama_index.readers.file.MarkdownReader"
] | [((12016, 12029), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (12027, 12029), False, 'from dotenv import load_dotenv, set_key\n'), ((12277, 12327), 'dspy.InputField', 'dspy.InputField', ([], {'desc': '"""may contain relevant facts"""'}), "(desc='may contain relevant facts')\n", (12292, 12327), False, 'import dspy\n'), ((12343, 12360), 'dspy.InputField', 'dspy.InputField', ([], {}), '()\n', (12358, 12360), False, 'import dspy\n'), ((12373, 12391), 'dspy.OutputField', 'dspy.OutputField', ([], {}), '()\n', (12389, 12391), False, 'import dspy\n'), ((18385, 18435), 'dspy.settings.configure', 'dspy.settings.configure', ([], {'rm': 'colbertv2', 'lm': 'lm_model'}), '(rm=colbertv2, lm=lm_model)\n', (18408, 18435), False, 'import dspy\n'), ((3909, 3957), 'dotenv.load_dotenv', 'load_dotenv', ([], {'dotenv_path': 'env_path', 'override': '(True)'}), '(dotenv_path=env_path, override=True)\n', (3920, 3957), False, 'from dotenv import load_dotenv, set_key\n'), ((4083, 4140), 'dotenv.set_key', 'set_key', (['env_path', '"""ANTHROPIC_API_KEY"""', 'anthropic_api_key'], {}), "(env_path, 'ANTHROPIC_API_KEY', anthropic_api_key)\n", (4090, 4140), False, 'from dotenv import load_dotenv, set_key\n'), ((4149, 4200), 'dotenv.set_key', 'set_key', (['env_path', '"""OPENAI_API_KEY"""', 'openai_api_key'], {}), "(env_path, 'OPENAI_API_KEY', openai_api_key)\n", (4156, 4200), False, 'from dotenv import load_dotenv, set_key\n'), ((4914, 4947), 'dotenv.load_dotenv', 'load_dotenv', ([], {'dotenv_path': 'env_path'}), '(dotenv_path=env_path)\n', (4925, 4947), False, 'from dotenv import load_dotenv, set_key\n'), ((5098, 5128), 'os.getenv', 'os.getenv', (['"""ANTHROPIC_API_KEY"""'], {}), "('ANTHROPIC_API_KEY')\n", (5107, 5128), False, 'import os\n'), ((5154, 5181), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (5163, 5181), False, 'import os\n'), ((5205, 5229), 'os.getenv', 'os.getenv', (['"""FIELDPROMPT"""'], {}), "('FIELDPROMPT')\n", (5214, 5229), False, 'import os\n'), ((5255, 5281), 'os.getenv', 'os.getenv', (['"""EXAMPLEPROMPT"""'], {}), "('EXAMPLEPROMPT')\n", (5264, 5281), False, 'import os\n'), ((5311, 5341), 'os.getenv', 'os.getenv', (['"""DESCRIPTIONPROMPT"""'], {}), "('DESCRIPTIONPROMPT')\n", (5320, 5341), False, 'import os\n'), ((8220, 8237), 'chromadb.Client', 'chromadb.Client', ([], {}), '()\n', (8235, 8237), False, 'import chromadb\n'), ((9287, 9298), 'llama_index.readers.file.CSVReader', 'CSVReader', ([], {}), '()\n', (9296, 9298), False, 'from llama_index.readers.file import CSVReader\n'), ((10638, 10658), 'os.walk', 'os.walk', (['folder_path'], {}), '(folder_path)\n', (10645, 10658), False, 'import os\n'), ((14611, 14669), 'dspy.Predict', 'dspy.Predict', (['signature_class'], {'n': 'additional_samples_needed'}), '(signature_class, n=additional_samples_needed)\n', (14623, 14669), False, 'import dspy\n'), ((25630, 25696), 'gradio.Error', 'gr.Error', (['"""Prompt could not be generated within max_length limit."""'], {}), "('Prompt could not be generated within max_length limit.')\n", (25638, 25696), True, 'import gradio as gr\n'), ((3743, 3752), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (3747, 3752), False, 'from pathlib import Path\n'), ((4796, 4805), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (4800, 4805), False, 'from pathlib import Path\n'), ((6922, 6933), 'llama_index.readers.file.CSVReader', 'CSVReader', ([], {}), '()\n', (6931, 6933), False, 'from llama_index.readers.file import CSVReader\n'), ((9341, 9353), 'llama_index.readers.file.DocxReader', 'DocxReader', ([], {}), '()\n', (9351, 9353), False, 'from llama_index.readers.file import DocxReader\n'), ((15172, 15211), 'dspy.InputField', 'dspy.InputField', ([], {'desc': '"""a random string"""'}), "(desc='a random string')\n", (15187, 15211), False, 'import dspy\n'), ((17818, 17849), 'anthropic.Anthropic', 'Anthropic', ([], {'api_key': 'self.api_key'}), '(api_key=self.api_key)\n', (17827, 17849), False, 'from anthropic import Anthropic, RateLimitError\n'), ((19755, 19860), 'dspy.teleprompt.BootstrapFewShotWithRandomSearch', 'BootstrapFewShotWithRandomSearch', ([], {'metric': 'metric_EM', 'max_bootstrapped_demos': '(2)', 'num_threads': 'NUM_THREADS'}), '(metric=metric_EM, max_bootstrapped_demos=2,\n num_threads=NUM_THREADS)\n', (19787, 19860), False, 'from dspy.teleprompt import BootstrapFewShotWithRandomSearch, BootstrapFinetune\n'), ((34073, 34084), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (34082, 34084), True, 'import gradio as gr\n'), ((6984, 6996), 'llama_index.readers.file.DocxReader', 'DocxReader', ([], {}), '()\n', (6994, 6996), False, 'from llama_index.readers.file import DocxReader\n'), ((9396, 9408), 'llama_index.readers.file.EpubReader', 'EpubReader', ([], {}), '()\n', (9406, 9408), False, 'from llama_index.readers.file import EpubReader\n'), ((10727, 10755), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (10739, 10755), False, 'import os\n'), ((13840, 13874), 'dspy.Predict', 'dspy.Predict', (['DescriptionSignature'], {}), '(DescriptionSignature)\n', (13852, 13874), False, 'import dspy\n'), ((16691, 16726), 'os.environ.get', 'os.environ.get', (['"""ANTHROPIC_API_KEY"""'], {}), "('ANTHROPIC_API_KEY')\n", (16705, 16726), False, 'import os\n'), ((25245, 25312), 'gradio.Error', 'gr.Error', (['"""The messages between user and assistant are not paired."""'], {}), "('The messages between user and assistant are not paired.')\n", (25253, 25312), True, 'import gradio as gr\n'), ((34111, 34146), 'gradio.Accordion', 'gr.Accordion', (['"""API Keys"""'], {'open': '(True)'}), "('API Keys', open=True)\n", (34123, 34146), True, 'import gradio as gr\n'), ((34429, 34448), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {}), "('Submit')\n", (34438, 34448), True, 'import gradio as gr\n'), ((34487, 34534), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""Confirmation"""', 'visible': '(False)'}), "(label='Confirmation', visible=False)\n", (34497, 34534), True, 'import gradio as gr\n'), ((34775, 34802), 'gradio.Accordion', 'gr.Accordion', (['"""Upload Data"""'], {}), "('Upload Data')\n", (34787, 34802), True, 'import gradio as gr\n'), ((34859, 34892), 'gradio.File', 'gr.File', ([], {'label': '"""Upload Data File"""'}), "(label='Upload Data File')\n", (34866, 34892), True, 'import gradio as gr\n'), ((34930, 34964), 'gradio.Button', 'gr.Button', (['"""Process Uploaded File"""'], {}), "('Process Uploaded File')\n", (34939, 34964), True, 'import gradio as gr\n'), ((35002, 35014), 'gradio.Textbox', 'gr.Textbox', ([], {}), '()\n', (35012, 35014), True, 'import gradio as gr\n'), ((35231, 35270), 'gradio.Accordion', 'gr.Accordion', (['"""Generate Synthetic Data"""'], {}), "('Generate Synthetic Data')\n", (35243, 35270), True, 'import gradio as gr\n'), ((35330, 35367), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""Schema Class Name"""'}), "(label='Schema Class Name')\n", (35340, 35367), True, 'import gradio as gr\n'), ((35404, 35445), 'gradio.Number', 'gr.Number', ([], {'label': '"""Sample Size"""', 'value': '(100)'}), "(label='Sample Size', value=100)\n", (35413, 35445), True, 'import gradio as gr\n'), ((35486, 35522), 'gradio.Button', 'gr.Button', (['"""Generate Synthetic Data"""'], {}), "('Generate Synthetic Data')\n", (35495, 35522), True, 'import gradio as gr\n'), ((35563, 35575), 'gradio.Textbox', 'gr.Textbox', ([], {}), '()\n', (35573, 35575), True, 'import gradio as gr\n'), ((35821, 35844), 'gradio.Accordion', 'gr.Accordion', (['"""Chatbot"""'], {}), "('Chatbot')\n", (35833, 35844), True, 'import gradio as gr\n'), ((35896, 35935), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""Enter your question"""'}), "(label='Enter your question')\n", (35906, 35935), True, 'import gradio as gr\n'), ((36218, 36291), 'gradio.Slider', 'gr.Slider', ([], {'label': '"""Top-p"""', 'minimum': '(0.0)', 'maximum': '(1.0)', 'value': '(0.95)', 'step': '(0.01)'}), "(label='Top-p', minimum=0.0, maximum=1.0, value=0.95, step=0.01)\n", (36227, 36291), True, 'import gradio as gr\n'), ((36372, 36450), 'gradio.Slider', 'gr.Slider', ([], {'label': '"""Temperature"""', 'minimum': '(0.0)', 'maximum': '(1.0)', 'value': '(0.7)', 'step': '(0.01)'}), "(label='Temperature', minimum=0.0, maximum=1.0, value=0.7, step=0.01)\n", (36381, 36450), True, 'import gradio as gr\n'), ((36494, 36582), 'gradio.Slider', 'gr.Slider', ([], {'label': '"""Repetition Penalty"""', 'minimum': '(1.0)', 'maximum': '(2.0)', 'value': '(1.1)', 'step': '(0.1)'}), "(label='Repetition Penalty', minimum=1.0, maximum=2.0, value=1.1,\n step=0.1)\n", (36503, 36582), True, 'import gradio as gr\n'), ((36621, 36669), 'gradio.Number', 'gr.Number', ([], {'label': '"""Max Length Tokens"""', 'value': '(2048)'}), "(label='Max Length Tokens', value=2048)\n", (36630, 36669), True, 'import gradio as gr\n'), ((36720, 36776), 'gradio.Number', 'gr.Number', ([], {'label': '"""Max Context Length Tokens"""', 'value': '(2048)'}), "(label='Max Context Length Tokens', value=2048)\n", (36729, 36776), True, 'import gradio as gr\n'), ((36810, 36850), 'gradio.Chatbot', 'gr.Chatbot', ([], {'label': '"""Chatbot Conversation"""'}), "(label='Chatbot Conversation')\n", (36820, 36850), True, 'import gradio as gr\n'), ((36883, 36902), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {}), "('Submit')\n", (36892, 36902), True, 'import gradio as gr\n'), ((7047, 7059), 'llama_index.readers.file.EpubReader', 'EpubReader', ([], {}), '()\n', (7057, 7059), False, 'from llama_index.readers.file import EpubReader\n'), ((9081, 9108), 'os.path.splitext', 'os.path.splitext', (['full_path'], {}), '(full_path)\n', (9097, 9108), False, 'import os\n'), ((9451, 9466), 'llama_index.readers.file.HTMLTagReader', 'HTMLTagReader', ([], {}), '()\n', (9464, 9466), False, 'from llama_index.readers.file import HTMLTagReader\n'), ((14710, 14754), 'random.randint', 'random.randint', (['(1)', 'additional_samples_needed'], {}), '(1, additional_samples_needed)\n', (14724, 14754), False, 'import random\n'), ((25518, 25592), 'gradio.Error', 'gr.Error', (['"""Input text processing failed, unable to respond in this round."""'], {}), "('Input text processing failed, unable to respond in this round.')\n", (25526, 25592), True, 'import gradio as gr\n'), ((34191, 34199), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (34197, 34199), True, 'import gradio as gr\n'), ((34247, 34301), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""Anthropic API Key"""', 'type': '"""password"""'}), "(label='Anthropic API Key', type='password')\n", (34257, 34301), True, 'import gradio as gr\n'), ((34345, 34396), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""OpenAI API Key"""', 'type': '"""password"""'}), "(label='OpenAI API Key', type='password')\n", (34355, 34396), True, 'import gradio as gr\n'), ((6693, 6727), 'os.path.splitext', 'os.path.splitext', (['self.source_file'], {}), '(self.source_file)\n', (6709, 6727), False, 'import os\n'), ((7110, 7125), 'llama_index.readers.file.HTMLTagReader', 'HTMLTagReader', ([], {}), '()\n', (7123, 7125), False, 'from llama_index.readers.file import HTMLTagReader\n'), ((9508, 9519), 'llama_index.readers.file.HWPReader', 'HWPReader', ([], {}), '()\n', (9517, 9519), False, 'from llama_index.readers.file import HWPReader\n'), ((7175, 7186), 'llama_index.readers.file.HWPReader', 'HWPReader', ([], {}), '()\n', (7184, 7186), False, 'from llama_index.readers.file import HWPReader\n'), ((9563, 9576), 'llama_index.readers.file.IPYNBReader', 'IPYNBReader', ([], {}), '()\n', (9574, 9576), False, 'from llama_index.readers.file import IPYNBReader\n'), ((7238, 7251), 'llama_index.readers.file.IPYNBReader', 'IPYNBReader', ([], {}), '()\n', (7249, 7251), False, 'from llama_index.readers.file import IPYNBReader\n'), ((9637, 9650), 'llama_index.readers.file.ImageReader', 'ImageReader', ([], {}), '()\n', (9648, 9650), False, 'from llama_index.readers.file import ImageReader\n'), ((26219, 26241), 'base64.b64encode', 'base64.b64encode', (['data'], {}), '(data)\n', (26235, 26241), False, 'import base64\n'), ((7320, 7333), 'llama_index.readers.file.ImageReader', 'ImageReader', ([], {}), '()\n', (7331, 7333), False, 'from llama_index.readers.file import ImageReader\n'), ((9747, 9763), 'llama_index.readers.file.MarkdownReader', 'MarkdownReader', ([], {}), '()\n', (9761, 9763), False, 'from llama_index.readers.file import MarkdownReader\n'), ((36154, 36184), 'os.getenv', 'os.getenv', (['"""ANTHROPIC_API_KEY"""'], {}), "('ANTHROPIC_API_KEY')\n", (36163, 36184), False, 'import os\n'), ((7438, 7454), 'llama_index.readers.file.MarkdownReader', 'MarkdownReader', ([], {}), '()\n', (7452, 7454), False, 'from llama_index.readers.file import MarkdownReader\n'), ((9806, 9818), 'llama_index.readers.file.MboxReader', 'MboxReader', ([], {}), '()\n', (9816, 9818), False, 'from llama_index.readers.file import MboxReader\n'), ((7505, 7517), 'llama_index.readers.file.MboxReader', 'MboxReader', ([], {}), '()\n', (7515, 7517), False, 'from llama_index.readers.file import MboxReader\n'), ((9860, 9871), 'llama_index.readers.file.PDFReader', 'PDFReader', ([], {}), '()\n', (9869, 9871), False, 'from llama_index.readers.file import PDFReader\n'), ((7567, 7578), 'llama_index.readers.file.PDFReader', 'PDFReader', ([], {}), '()\n', (7576, 7578), False, 'from llama_index.readers.file import PDFReader\n'), ((9914, 9926), 'llama_index.readers.file.PptxReader', 'PptxReader', ([], {}), '()\n', (9924, 9926), False, 'from llama_index.readers.file import PptxReader\n'), ((7629, 7641), 'llama_index.readers.file.PptxReader', 'PptxReader', ([], {}), '()\n', (7639, 7641), False, 'from llama_index.readers.file import PptxReader\n'), ((9968, 9979), 'llama_index.readers.file.RTFReader', 'RTFReader', ([], {}), '()\n', (9977, 9979), False, 'from llama_index.readers.file import RTFReader\n'), ((7691, 7702), 'llama_index.readers.file.RTFReader', 'RTFReader', ([], {}), '()\n', (7700, 7702), False, 'from llama_index.readers.file import RTFReader\n'), ((10021, 10032), 'llama_index.readers.file.XMLReader', 'XMLReader', ([], {}), '()\n', (10030, 10032), False, 'from llama_index.readers.file import XMLReader\n'), ((7752, 7763), 'llama_index.readers.file.XMLReader', 'XMLReader', ([], {}), '()\n', (7761, 7763), False, 'from llama_index.readers.file import XMLReader\n'), ((10089, 10109), 'llama_index.readers.web.AsyncWebPageReader', 'AsyncWebPageReader', ([], {}), '()\n', (10107, 10109), False, 'from llama_index.readers.web import AsyncWebPageReader\n'), ((7835, 7855), 'llama_index.readers.web.AsyncWebPageReader', 'AsyncWebPageReader', ([], {}), '()\n', (7853, 7855), False, 'from llama_index.readers.web import AsyncWebPageReader\n')] |
# Imports
from collections import defaultdict
from time import sleep
from llama_index import (
StorageContext,
load_index_from_storage,
set_global_service_context,
)
from model_context import get_anyscale_context
from templates import custom_template, yn_template
import csv
from tqdm import tqdm
from openai import OpenAI
client = OpenAI(base_url="https://api.endpoints.anyscale.com/v1", api_key="KEY")
# DEBUG LOGS
# import llama_index
# llama_index.set_global_handler("simple")
rag = True
yn = False
if rag:
# Select Model
print("Loading model context...")
service_context = get_anyscale_context()
set_global_service_context(service_context)
# Load embedded data for RAG
print("Loading RAG embeddings...")
storage_context = StorageContext.from_defaults(persist_dir="vector-db-all")
index = load_index_from_storage(
service_context=service_context, storage_context=storage_context
)
# Assemble Query Engine
top_k = 3
if yn:
query_engine = index.as_query_engine(
text_qa_template=yn_template,
similarity_top_k=top_k,
# verbose=True,
# streaming=True,
)
else:
query_engine = index.as_query_engine(
text_qa_template=custom_template,
similarity_top_k=top_k,
# verbose=True,
# streaming=True,
)
def query_baseline(text: str, yn: bool) -> str:
while True:
if yn:
content_msg = "Answer with yes/no and an explanation."
else:
content_msg = "Express whether the statement is true or false and explain why." #Your job is to
try:
chat_completion = client.chat.completions.create(
model="meta-llama/Llama-2-7b-chat-hf",
messages=[
{
"role": "system",
"content": content_msg,
},
{
"role": "user",
"content": text,
},
],
temperature=0,
)
return chat_completion.choices[0].message.content.strip()
except:
print("BROKE: ", text)
sleep(10)
# Load evaluation data
print("Loading evaluation data...")
labeled_data = defaultdict(list)
with open("../neg-exemplars-raw/exceptions.onlyValid.csv", "r") as full_data:
data_reader = csv.DictReader(full_data)
for sample in data_reader:
labeled_data[sample["generic_new"]].append(sample["exemplar"])
print(f"{len(labeled_data)} generics loaded!")
generics = list(labeled_data.keys())
# Evaluation Loop
print("Beginning evaluation:")
tie = 0
loss = 0
win = 0
with open(f"all_answers_{'rag' if rag else 'base'}_{'yn' if yn else 'tf'}.txt", 'w') as ans_file:
for i in tqdm(range(1000), desc="Generic evaluation process"):
sample = generics[i]
for ext in ["All", "Not all"]:
prompt = ext.lower() + " " + sample
if yn:
prompt = "Is it true that " + prompt[:-1].lower() + "?" #investigate
if rag:
response = query_engine.query(prompt)
else:
response = query_baseline(prompt, yn)
# Record answer
ans_file.write("INDEX: " + str(i) + '\n')
ans_file.write("BASE INPUT: " + prompt + '\n')
ans_file.write("RESPONSE: " + '\n' + str(response) + '\n\n')
if yn:
process = str(response).lower()
false_count = process.count("no") - process.count("not") - process.count("now") - process.count("noc") - process.count("nor") - process.count("non") - process.count("nou")
true_count = str(response).lower().count("yes") - str(response).lower().count("eyes")
else:
false_count = str(response).lower().count("false")
true_count = str(response).lower().count("true")
# print(false_count)
# print(true_count)
if ext == "All":
good = false_count
bad = true_count
elif ext == "Not all":
good = true_count
bad = false_count
ans_file.write("RESULT: ")
if good > bad:
win += 1
ans_file.write("WIN")
elif bad > good:
loss += 1
ans_file.write("LOSS")
else:
tie += 1
ans_file.write("TIE")
ans_file.write('\n\n-------------------\n\n')
print("Wins: ", win)
print("Ties: ", tie)
print("Loss: ", loss)
| [
"llama_index.load_index_from_storage",
"llama_index.set_global_service_context",
"llama_index.StorageContext.from_defaults"
] | [((345, 416), 'openai.OpenAI', 'OpenAI', ([], {'base_url': '"""https://api.endpoints.anyscale.com/v1"""', 'api_key': '"""KEY"""'}), "(base_url='https://api.endpoints.anyscale.com/v1', api_key='KEY')\n", (351, 416), False, 'from openai import OpenAI\n'), ((2366, 2383), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2377, 2383), False, 'from collections import defaultdict\n'), ((606, 628), 'model_context.get_anyscale_context', 'get_anyscale_context', ([], {}), '()\n', (626, 628), False, 'from model_context import get_anyscale_context\n'), ((633, 676), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (659, 676), False, 'from llama_index import StorageContext, load_index_from_storage, set_global_service_context\n'), ((772, 829), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""vector-db-all"""'}), "(persist_dir='vector-db-all')\n", (800, 829), False, 'from llama_index import StorageContext, load_index_from_storage, set_global_service_context\n'), ((842, 936), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(service_context=service_context, storage_context=\n storage_context)\n', (865, 936), False, 'from llama_index import StorageContext, load_index_from_storage, set_global_service_context\n'), ((2480, 2505), 'csv.DictReader', 'csv.DictReader', (['full_data'], {}), '(full_data)\n', (2494, 2505), False, 'import csv\n'), ((2281, 2290), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (2286, 2290), False, 'from time import sleep\n')] |
import os
import glob
import llama_index
from llama_index.core import ServiceContext
from llama_index.llms.anthropic import Anthropic
from llama_index.core import SimpleDirectoryReader
from llama_index.core.response_synthesizers import TreeSummarize
# MODEL = "claude-3-opus-20240229"
# MODEL = "claude-3-sonnet-20240229"
MODEL = "claude-3-haiku-20240307"
DATA_DIR = "data"
SUMMARY_ROOT = "summaries"
SUMMARY_DIR = os.path.join(SUMMARY_ROOT, MODEL).replace(":", "_")
os.makedirs(SUMMARY_DIR, exist_ok=True)
def saveText(path, text):
"Save the given text to a file at the specified path."
with open(path, "w") as f:
f.write(text)
def commentPaths(ticketNumber):
"Returns a sorted list of file paths for the comments in Zendesk ticket `ticketNumber`."
ticketDir = os.path.join(DATA_DIR, ticketNumber)
return sorted(glob.glob(os.path.join(ticketDir, "*.txt")))
def summaryPath(ticketNumber):
"Returns the file path for where we store the summary of Zendesk ticket `ticketNumber`."
return os.path.join(SUMMARY_DIR, f"{ticketNumber}.txt")
def totalSizeKB(paths):
"Returns the total size in kilobytes of the files specified by `paths`."
return sum(os.path.getsize(path) for path in paths) / 1024
def currentTime():
"Returns the current time in the format 'dd/mm/YYYY HH:MM:SS'."
from datetime import datetime
now = datetime.now()
return now.strftime("%d/%m/%Y %H:%M:%S")
llm = Anthropic(model=MODEL, max_tokens=1024)
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local")
summarizer = TreeSummarize(service_context=service_context, verbose=False)
SUMMARY_PROMPT = "The following text is a series of messages from a PaperCut support ticket. Summarise the whole conversation, including a list of particpants and who they work for, the problem or problems, the key events and date they occurred, and the current status of the ticket. Include any log lines from the messages."
def summariseTicket(ticketNumber):
"Summarizes the Zendesk ticket with the given `ticketNumber` and returns the summary text."
input_files = commentPaths(ticketNumber)
reader = SimpleDirectoryReader(input_files=input_files)
docs = reader.load_data()
texts = [doc.text for doc in docs]
return summarizer.get_response(SUMMARY_PROMPT, texts)
if __name__ == "__main__":
import time
print(f"MODEL={MODEL}")
ticketNumbers = sorted(os.path.basename(path) for path in glob.glob(os.path.join(DATA_DIR, "*")))
ticketNumbers.sort(key=lambda k: (totalSizeKB(commentPaths(k)), k))
print(ticketNumbers)
for i, ticketNumber in enumerate(ticketNumbers):
paths = commentPaths(ticketNumber)
print(f"{i:4}: {ticketNumber:8} {len(paths):3} comments {totalSizeKB(paths):7.3f} kb")
t00 = time.time()
summaries = {}
durations = {}
commentCounts = {}
commentSizes = {}
for i, ticketNumber in enumerate(ticketNumbers):
commentCount = len(commentPaths(ticketNumber))
commentSize = totalSizeKB(commentPaths(ticketNumber))
print(f"{i:2}: ticketNumber={ticketNumber:8} {commentCount:3} comments {commentSize:7.3f} kb {currentTime()}",
flush=True)
if os.path.exists(summaryPath(ticketNumber)):
print(f"Skipping ticket {ticketNumber}", flush=True)
continue # Skip tickets that have already been summarised.
t0 = time.time()
summary = summariseTicket(ticketNumber)
duration = time.time() - t0
description = f"{commentCount} comments {commentSize:7.3f} kb {duration:5.2f} sec summary={len(summary)}"
print(f" {description}", flush=True)
with open(summaryPath(ticketNumber), "w") as f:
print(f"Summary: ticket {ticketNumber}: {description} -------------------------", file=f)
print(summary, file=f)
summaries[ticketNumber] = summary
durations[ticketNumber] = duration
commentCounts[ticketNumber] = commentCount
commentSizes[ticketNumber] = commentSize
duration = time.time() - t00
print("====================^^^====================")
print(f"Duration: {duration:.2f} seconds")
for i, ticketNumber in enumerate(ticketNumbers):
commentCount = commentCounts[ticketNumber]
commentSize = totalSizeKB(commentPaths(ticketNumber))
duration = durations[ticketNumber]
print(f"{i:2}: {ticketNumber:8}: {commentCount:3} comments {commentSize:7.3f} kb {duration:5.2f} seconds")
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.ServiceContext.from_defaults",
"llama_index.llms.anthropic.Anthropic",
"llama_index.core.response_synthesizers.TreeSummarize"
] | [((470, 509), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (481, 509), False, 'import os\n'), ((1440, 1479), 'llama_index.llms.anthropic.Anthropic', 'Anthropic', ([], {'model': 'MODEL', 'max_tokens': '(1024)'}), '(model=MODEL, max_tokens=1024)\n', (1449, 1479), False, 'from llama_index.llms.anthropic import Anthropic\n'), ((1498, 1556), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (1526, 1556), False, 'from llama_index.core import ServiceContext\n'), ((1570, 1631), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context', 'verbose': '(False)'}), '(service_context=service_context, verbose=False)\n', (1583, 1631), False, 'from llama_index.core.response_synthesizers import TreeSummarize\n'), ((791, 827), 'os.path.join', 'os.path.join', (['DATA_DIR', 'ticketNumber'], {}), '(DATA_DIR, ticketNumber)\n', (803, 827), False, 'import os\n'), ((1027, 1075), 'os.path.join', 'os.path.join', (['SUMMARY_DIR', 'f"""{ticketNumber}.txt"""'], {}), "(SUMMARY_DIR, f'{ticketNumber}.txt')\n", (1039, 1075), False, 'import os\n'), ((1373, 1387), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1385, 1387), False, 'from datetime import datetime\n'), ((2149, 2195), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'input_files'}), '(input_files=input_files)\n', (2170, 2195), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((2796, 2807), 'time.time', 'time.time', ([], {}), '()\n', (2805, 2807), False, 'import time\n'), ((417, 450), 'os.path.join', 'os.path.join', (['SUMMARY_ROOT', 'MODEL'], {}), '(SUMMARY_ROOT, MODEL)\n', (429, 450), False, 'import os\n'), ((3412, 3423), 'time.time', 'time.time', ([], {}), '()\n', (3421, 3423), False, 'import time\n'), ((4065, 4076), 'time.time', 'time.time', ([], {}), '()\n', (4074, 4076), False, 'import time\n'), ((856, 888), 'os.path.join', 'os.path.join', (['ticketDir', '"""*.txt"""'], {}), "(ticketDir, '*.txt')\n", (868, 888), False, 'import os\n'), ((2422, 2444), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2438, 2444), False, 'import os\n'), ((3491, 3502), 'time.time', 'time.time', ([], {}), '()\n', (3500, 3502), False, 'import time\n'), ((1193, 1214), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (1208, 1214), False, 'import os\n'), ((2467, 2494), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""*"""'], {}), "(DATA_DIR, '*')\n", (2479, 2494), False, 'import os\n')] |
import streamlit as st
import os
import openai
import llama_index
from llama_index.llms import OpenAI
from llama_index.indices.composability import ComposableGraph
from llama_index.storage import StorageContext
from llama_index import TreeIndex, SummaryIndex
from llama_index.indices.loading import load_graph_from_storage
from llama_index.indices.loading import load_graph_from_storage
from llama_index.storage import StorageContext
import streamlit as st
import openai
openai.api_key= st.secrets['OPENAI_API_KEY']
st.set_page_config(page_title="Chat with AAPL 23 10-Qs, powered by Munger", page_icon=":chart_with_upwards_trend:", layout="centered", initial_sidebar_state="auto", menu_items=None)
st.title("Chat with Munger :chart_with_upwards_trend: :eyeglasses:")
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "Ask me a question about Apple's 2023 financial documents!"}
]
@st.cache_resource(show_spinner=False)
def load_data():
with st.spinner(text="Loading and indexing the AAPL 2023 10-Q docs – hang tight! This should take 1-2 minutes."):
# Create a storage context using the persist directory
storage_context = StorageContext.from_defaults(persist_dir='./storage')
# Load the graph from the storage context
graph = load_graph_from_storage(storage_context, root_id="APPL-23")
query_engine = graph.as_query_engine(child_branch_factor=1)
return query_engine
query_engine =load_data()
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = query_engine.query(prompt)
#ipdb.set_trace()
st.write(response.response)
#st.code(response.get_formatted_sources())
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
| [
"llama_index.indices.loading.load_graph_from_storage",
"llama_index.storage.StorageContext.from_defaults"
] | [((520, 709), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with AAPL 23 10-Qs, powered by Munger"""', 'page_icon': '""":chart_with_upwards_trend:"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title='Chat with AAPL 23 10-Qs, powered by Munger',\n page_icon=':chart_with_upwards_trend:', layout='centered',\n initial_sidebar_state='auto', menu_items=None)\n", (538, 709), True, 'import streamlit as st\n'), ((702, 770), 'streamlit.title', 'st.title', (['"""Chat with Munger :chart_with_upwards_trend: :eyeglasses:"""'], {}), "('Chat with Munger :chart_with_upwards_trend: :eyeglasses:')\n", (710, 770), True, 'import streamlit as st\n'), ((1002, 1039), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1019, 1039), True, 'import streamlit as st\n'), ((793, 816), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (814, 816), True, 'import streamlit as st\n'), ((1593, 1623), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (1606, 1623), True, 'import streamlit as st\n'), ((1678, 1747), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (1710, 1747), True, 'import streamlit as st\n'), ((1066, 1183), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading and indexing the AAPL 2023 10-Q docs – hang tight! This should take 1-2 minutes."""'}), "(text=\n 'Loading and indexing the AAPL 2023 10-Q docs – hang tight! This should take 1-2 minutes.'\n )\n", (1076, 1183), True, 'import streamlit as st\n'), ((1265, 1318), 'llama_index.storage.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1293, 1318), False, 'from llama_index.storage import StorageContext\n'), ((1386, 1445), 'llama_index.indices.loading.load_graph_from_storage', 'load_graph_from_storage', (['storage_context'], {'root_id': '"""APPL-23"""'}), "(storage_context, root_id='APPL-23')\n", (1409, 1445), False, 'from llama_index.indices.loading import load_graph_from_storage\n'), ((1834, 1866), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (1849, 1866), True, 'import streamlit as st\n'), ((1876, 1904), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (1884, 1904), True, 'import streamlit as st\n'), ((2045, 2073), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2060, 2073), True, 'import streamlit as st\n'), ((2088, 2113), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (2098, 2113), True, 'import streamlit as st\n'), ((2207, 2234), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2215, 2234), True, 'import streamlit as st\n'), ((2376, 2417), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (2408, 2417), True, 'import streamlit as st\n')] |
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-wandb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import os
from getpass import getpass
if os.getenv("OPENAI_API_KEY") is None:
os.environ["OPENAI_API_KEY"] = getpass(
"Paste your OpenAI key from:"
" https://platform.openai.com/account/api-keys\n"
)
assert os.getenv("OPENAI_API_KEY", "").startswith(
"sk-"
), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
from llama_index.core.callbacks import CallbackManager
from llama_index.core.callbacks import LlamaDebugHandler
from llama_index.callbacks.wandb import WandbCallbackHandler
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
SimpleKeywordTableIndex,
StorageContext,
)
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-4", temperature=0)
import llama_index.core
from llama_index.core import set_global_handler
set_global_handler("wandb", run_args={"project": "llamaindex"})
wandb_callback = llama_index.core.global_handler
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
run_args = dict(
project="llamaindex",
)
wandb_callback = WandbCallbackHandler(run_args=run_args)
Settings.callback_manager = CallbackManager([llama_debug, wandb_callback])
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
docs = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(docs)
wandb_callback.persist_index(index, index_name="simple_vector_store")
from llama_index.core import load_index_from_storage
storage_context = wandb_callback.load_storage_context(
artifact_url="ayut/llamaindex/simple_vector_store:v0"
)
index = load_index_from_storage(
storage_context,
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(response, sep="\n")
wandb_callback.finish() | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.llms.openai.OpenAI",
"llama_index.callbacks.wandb.WandbCallbackHandler",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.load_index_from_storage",
"llama_index.core.set_global_handler",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.callbacks.LlamaDebugHandler"
] | [((926, 962), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0)'}), "(model='gpt-4', temperature=0)\n", (932, 962), False, 'from llama_index.llms.openai import OpenAI\n'), ((1040, 1103), 'llama_index.core.set_global_handler', 'set_global_handler', (['"""wandb"""'], {'run_args': "{'project': 'llamaindex'}"}), "('wandb', run_args={'project': 'llamaindex'})\n", (1058, 1103), False, 'from llama_index.core import set_global_handler\n'), ((1170, 1212), 'llama_index.core.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (1187, 1212), False, 'from llama_index.core.callbacks import LlamaDebugHandler\n'), ((1277, 1316), 'llama_index.callbacks.wandb.WandbCallbackHandler', 'WandbCallbackHandler', ([], {'run_args': 'run_args'}), '(run_args=run_args)\n', (1297, 1316), False, 'from llama_index.callbacks.wandb import WandbCallbackHandler\n'), ((1346, 1392), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[llama_debug, wandb_callback]'], {}), '([llama_debug, wandb_callback])\n', (1361, 1392), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((1716, 1753), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {}), '(docs)\n', (1747, 1753), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext\n'), ((2008, 2048), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2031, 2048), False, 'from llama_index.core import load_index_from_storage\n'), ((190, 217), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (199, 217), False, 'import os\n'), ((262, 357), 'getpass.getpass', 'getpass', (['"""Paste your OpenAI key from: https://platform.openai.com/account/api-keys\n"""'], {}), "(\n 'Paste your OpenAI key from: https://platform.openai.com/account/api-keys\\n'\n )\n", (269, 357), False, 'from getpass import getpass\n'), ((380, 411), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (389, 411), False, 'import os\n'), ((1649, 1693), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data/paul_graham/"""'], {}), "('./data/paul_graham/')\n", (1670, 1693), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext\n')] |
from langfuse import Langfuse
from llama_index.llms.openai import OpenAI
import llama_index.core
llama_index.core.set_global_handler("langfuse")
from llama_index.core.llms import ChatMessage
import json
from json_repair import repair_json
import os
langfuse = Langfuse()
prompt = langfuse.get_prompt("extraction-prompt-1")
with open("../data.json", "r") as f:
input_text = f.read()
data = json.loads(input_text)
model = OpenAI(model="gpt-4-turbo-preview")
# model = OpenAI()
for chlen in data:
# skip if file exists
if os.path.exists(f'../terms-output/{chlen["article_num"]}.json'):
print(f"chlen {chlen['article_num']} already exists")
continue
try:
compiled_prompt = prompt.compile(input=chlen['text'])
generation = langfuse.generation(input=chlen['text'], prompt=prompt,model=model.model)
messages = [
ChatMessage(role="system", content="You are an API that must always respond with a json without any formatting."),
ChatMessage(role="user", content=compiled_prompt),
]
chat_completion = model.chat(messages)
with open(f'../terms-output/{chlen["article_num"]}.json', 'w') as f:
f.write(json.dumps(json.loads(repair_json(chat_completion.message.content)), ensure_ascii=False))
generation.end(output=chat_completion)
print(f"chlen {chlen['article_num']} completed")
except Exception as e:
print(f"chlen {chlen['article_num']} failed with error: {e}") | [
"llama_index.llms.openai.OpenAI",
"llama_index.core.llms.ChatMessage"
] | [((261, 271), 'langfuse.Langfuse', 'Langfuse', ([], {}), '()\n', (269, 271), False, 'from langfuse import Langfuse\n'), ((433, 468), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-turbo-preview"""'}), "(model='gpt-4-turbo-preview')\n", (439, 468), False, 'from llama_index.llms.openai import OpenAI\n'), ((400, 422), 'json.loads', 'json.loads', (['input_text'], {}), '(input_text)\n', (410, 422), False, 'import json\n'), ((536, 598), 'os.path.exists', 'os.path.exists', (['f"""../terms-output/{chlen[\'article_num\']}.json"""'], {}), '(f"../terms-output/{chlen[\'article_num\']}.json")\n', (550, 598), False, 'import os\n'), ((853, 976), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': '"""You are an API that must always respond with a json without any formatting."""'}), "(role='system', content=\n 'You are an API that must always respond with a json without any formatting.'\n )\n", (864, 976), False, 'from llama_index.core.llms import ChatMessage\n'), ((976, 1025), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': '"""user"""', 'content': 'compiled_prompt'}), "(role='user', content=compiled_prompt)\n", (987, 1025), False, 'from llama_index.core.llms import ChatMessage\n'), ((1187, 1231), 'json_repair.repair_json', 'repair_json', (['chat_completion.message.content'], {}), '(chat_completion.message.content)\n', (1198, 1231), False, 'from json_repair import repair_json\n')] |
import streamlit as st
import llama_index
from llama_index import StorageContext, load_index_from_storage
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores import SimpleVectorStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index import KeywordTableIndex
from llama_index.indices.keyword_table import SimpleKeywordTableIndex
from llama_index import ResponseSynthesizer
from llama_index.indices.postprocessor import SimilarityPostprocessor
from llama_index.retrievers import VectorIndexRetriever
from llama_index.retrievers import ListIndexRetriever
from llama_index.retrievers import TreeRootRetriever
from llama_index.indices.keyword_table.retrievers import KeywordTableGPTRetriever
from llama_index.indices.keyword_table import GPTSimpleKeywordTableIndex
from llama_index.indices.keyword_table.retrievers import KeywordTableRAKERetriever
from llama_index.indices.keyword_table.retrievers import KeywordTableSimpleRetriever
from llama_index import Prompt
from llama_index import LLMPredictor
from langchain.chat_models import ChatOpenAI
from llama_index import ServiceContext
def main():
st.title("Llama Index App")
# Select indexes
index_names = ["vector_store", "table", "tree", "list"]
index_choices = st.multiselect("Select indexes", index_names)
# Load indexes from storage contexts
indices = []
for index_name in index_choices:
storage_context = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=index_name),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=index_name),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=index_name),
)
index = load_index_from_storage(storage_context)
indices.append(index)
# Prompt user for query
query = st.text_input("Enter your query")
# Query the indexes
response = None
for index in indices:
TEMPLATE_STR = (
"We have provided context information below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given this information, please answer the question: {query_str}\n"
)
QA_TEMPLATE = Prompt(TEMPLATE_STR)
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", streaming=True))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size=1024)
query_engine = index.as_query_engine(
service_context=service_context,
text_qa_template=QA_TEMPLATE,
similarity_top_k=3,
streaming=True,
)
response = query_engine.query(query)
st.subheader(f"Results from {index.__class__.__name__}")
# Display the response
if response:
# formatted_sources = response.get_formatted_sources()
st.text(response)
print(response)
if __name__ == "__main__":
main()
###### working ######## | [
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.ServiceContext.from_defaults",
"llama_index.Prompt",
"llama_index.vector_stores.SimpleVectorStore.from_persist_dir",
"llama_index.load_index_from_storage"
] | [((1225, 1252), 'streamlit.title', 'st.title', (['"""Llama Index App"""'], {}), "('Llama Index App')\n", (1233, 1252), True, 'import streamlit as st\n'), ((1355, 1400), 'streamlit.multiselect', 'st.multiselect', (['"""Select indexes"""', 'index_names'], {}), "('Select indexes', index_names)\n", (1369, 1400), True, 'import streamlit as st\n'), ((1942, 1975), 'streamlit.text_input', 'st.text_input', (['"""Enter your query"""'], {}), "('Enter your query')\n", (1955, 1975), True, 'import streamlit as st\n'), ((1830, 1870), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1853, 1870), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((2331, 2351), 'llama_index.Prompt', 'Prompt', (['TEMPLATE_STR'], {}), '(TEMPLATE_STR)\n', (2337, 2351), False, 'from llama_index import Prompt\n'), ((2490, 2564), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(1024)'}), '(llm_predictor=llm_predictor, chunk_size=1024)\n', (2518, 2564), False, 'from llama_index import ServiceContext\n'), ((2805, 2861), 'streamlit.subheader', 'st.subheader', (['f"""Results from {index.__class__.__name__}"""'], {}), "(f'Results from {index.__class__.__name__}')\n", (2817, 2861), True, 'import streamlit as st\n'), ((3003, 3020), 'streamlit.text', 'st.text', (['response'], {}), '(response)\n', (3010, 3020), True, 'import streamlit as st\n'), ((1574, 1634), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': 'index_name'}), '(persist_dir=index_name)\n', (1610, 1634), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((1661, 1719), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': 'index_name'}), '(persist_dir=index_name)\n', (1695, 1719), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((1745, 1802), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': 'index_name'}), '(persist_dir=index_name)\n', (1778, 1802), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((2393, 2462), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)'}), "(temperature=0, model_name='gpt-3.5-turbo', streaming=True)\n", (2403, 2462), False, 'from langchain.chat_models import ChatOpenAI\n')] |
from fastapi import FastAPI
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
import os.path
import llama_index
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
ServiceContext,
load_index_from_storage,
set_global_service_context,
PromptTemplate
)
from llama_index.embeddings import HuggingFaceEmbedding
from fastapi.middleware.cors import CORSMiddleware
import logging
import sys
# Set global handler for LLaMA index
llama_index.set_global_handler("simple")
# Initialize FastAPI app
app = FastAPI()
# Define directory for persisting index
PERSIST_DIR = "./storage"
# Initialize embedding model
embed_model = HuggingFaceEmbedding(model_name="OrdalieTech/Solon-embeddings-large-0.1")
# Create service context with embedding model
service_context = ServiceContext.from_defaults(embed_model=embed_model)
set_global_service_context(service_context)
# Load or create the index
if not os.path.exists(PERSIST_DIR):
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir=PERSIST_DIR)
else:
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
# Initialize query engine from index
query_engine = index.as_query_engine(streaming=True, similarity_top_k=2)
# Define custom prompt template
qa_prompt_tmpl_str = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge,Some rules to follow: 1. Avoid statements like 'Based on the context, ...' or 'The context information ...' or anything along those lines. "
"answer the query in french and but remember you are chatbot trained on rh questions so always put that in perspective . you are named Rhym a chatbot created by the innovation team at BMCI \n"
"Query: {query_str}\n"
"Answer: "
)
qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str)
# Update query engine with custom prompt template
query_engine.update_prompts(
{"response_synthesizer:text_qa_template": qa_prompt_tmpl}
)
# Define Pydantic model for query requests
class Query(BaseModel):
text: str
@app.get("/query")
async def query_index(query: str ):
try:
response_stream = query_engine.query(query)
async def event_stream():
for text in response_stream.response_gen:
yield f"data: {text}\n\n"
# Send a special message or marker to indicate the end of the stream
yield "data: END_OF_STREAM\n\n"
return StreamingResponse(event_stream(), media_type="text/event-stream")
except Exception as e:
logging.error(f"Error during query processing: {str(e)}")
return JSONResponse(
status_code=503,
content={"message": "LLM API is currently unavailable.", "error": str(e)}
)
# Add CORS middleware to allow specific origins (or use '*' for all origins)
origins = [
"*", # Allow all origins
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# ... [rest of your code]
# The main function remains unchanged
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.set_global_handler",
"llama_index.StorageContext.from_defaults",
"llama_index.set_global_service_context",
"llama_index.PromptTemplate",
"llama_index.load_index_from_storage",
"llama_index.embeddings.HuggingFaceEmbedding"
] | [((528, 568), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (558, 568), False, 'import llama_index\n'), ((601, 610), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (608, 610), False, 'from fastapi import FastAPI\n'), ((722, 795), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""OrdalieTech/Solon-embeddings-large-0.1"""'}), "(model_name='OrdalieTech/Solon-embeddings-large-0.1')\n", (742, 795), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((861, 914), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (889, 914), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n'), ((915, 958), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (941, 958), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n'), ((2076, 2110), 'llama_index.PromptTemplate', 'PromptTemplate', (['qa_prompt_tmpl_str'], {}), '(qa_prompt_tmpl_str)\n', (2090, 2110), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n'), ((1093, 1135), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1124, 1135), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n'), ((1223, 1276), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIR'}), '(persist_dir=PERSIST_DIR)\n', (1251, 1276), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n'), ((1289, 1329), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1312, 1329), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n'), ((3437, 3480), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8000)'}), "(app, host='0.0.0.0', port=8000)\n", (3448, 3480), False, 'import uvicorn\n'), ((1039, 1068), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (1060, 1068), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n')] |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-tables-chain-of-table-base')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/3.csv")
df
from llama_index.packs.tables.chain_of_table.base import (
ChainOfTableQueryEngine,
serialize_table,
)
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"ChainOfTablePack",
"./chain_of_table_pack",
skip_load=True,
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
import pandas as pd
df = pd.read_csv("~/Downloads/WikiTableQuestions/csv/200-csv/11.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("Who won best Director in the 1972 Academy Awards?")
str(response.response)
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/200-csv/42.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("What was the precipitation in inches during June?")
str(response)
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
prompt_str = """\
Here's a serialized table.
{serialized_table}
Given this table please answer the question: {question}
Answer: """
prompt = PromptTemplate(prompt_str)
prompt_c = prompt.as_query_component(partial={"serialized_table": serialize_table(df)})
qp = QueryPipeline(chain=[prompt_c, llm])
response = qp.run("What was the precipitation in inches during June?")
print(str(response))
import pandas as pd
df = pd.read_csv("./WikiTableQuestions/csv/203-csv/114.csv")
df
query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True)
response = query_engine.query("Which televised ABC game had the greatest attendance?")
print(str(response))
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
prompt_str = """\
Here's a serialized table.
{serialized_table}
Given this table please answer the question: {question}
Answer: """
prompt = PromptTemplate(prompt_str)
prompt_c = prompt.as_query_component(partial={"serialized_table": serialize_table(df)})
qp = QueryPipeline(chain=[prompt_c, llm])
response = qp.run("Which televised ABC game had the greatest attendance?")
print(str(response)) | [
"llama_index.core.llama_pack.download_llama_pack",
"llama_index.llms.openai.OpenAI",
"llama_index.core.PromptTemplate",
"llama_index.core.query_pipeline.QueryPipeline",
"llama_index.packs.tables.chain_of_table.base.serialize_table",
"llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine"
] | [((389, 442), 'pandas.read_csv', 'pd.read_csv', (['"""./WikiTableQuestions/csv/200-csv/3.csv"""'], {}), "('./WikiTableQuestions/csv/200-csv/3.csv')\n", (400, 442), True, 'import pandas as pd\n'), ((622, 707), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""ChainOfTablePack"""', '"""./chain_of_table_pack"""'], {'skip_load': '(True)'}), "('ChainOfTablePack', './chain_of_table_pack', skip_load=True\n )\n", (641, 707), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((770, 804), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""'}), "(model='gpt-4-1106-preview')\n", (776, 804), False, 'from llama_index.llms.openai import OpenAI\n'), ((854, 869), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (867, 869), True, 'import phoenix as px\n'), ((953, 1017), 'pandas.read_csv', 'pd.read_csv', (['"""~/Downloads/WikiTableQuestions/csv/200-csv/11.csv"""'], {}), "('~/Downloads/WikiTableQuestions/csv/200-csv/11.csv')\n", (964, 1017), True, 'import pandas as pd\n'), ((1040, 1090), 'llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine', 'ChainOfTableQueryEngine', (['df'], {'llm': 'llm', 'verbose': '(True)'}), '(df, llm=llm, verbose=True)\n', (1063, 1090), False, 'from llama_index.packs.tables.chain_of_table.base import ChainOfTableQueryEngine, serialize_table\n'), ((1230, 1284), 'pandas.read_csv', 'pd.read_csv', (['"""./WikiTableQuestions/csv/200-csv/42.csv"""'], {}), "('./WikiTableQuestions/csv/200-csv/42.csv')\n", (1241, 1284), True, 'import pandas as pd\n'), ((1307, 1357), 'llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine', 'ChainOfTableQueryEngine', (['df'], {'llm': 'llm', 'verbose': '(True)'}), '(df, llm=llm, verbose=True)\n', (1330, 1357), False, 'from llama_index.packs.tables.chain_of_table.base import ChainOfTableQueryEngine, serialize_table\n'), ((1708, 1734), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (1722, 1734), False, 'from llama_index.core import PromptTemplate\n'), ((1831, 1867), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_c, llm]'}), '(chain=[prompt_c, llm])\n', (1844, 1867), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((1989, 2044), 'pandas.read_csv', 'pd.read_csv', (['"""./WikiTableQuestions/csv/203-csv/114.csv"""'], {}), "('./WikiTableQuestions/csv/203-csv/114.csv')\n", (2000, 2044), True, 'import pandas as pd\n'), ((2065, 2115), 'llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine', 'ChainOfTableQueryEngine', (['df'], {'llm': 'llm', 'verbose': '(True)'}), '(df, llm=llm, verbose=True)\n', (2088, 2115), False, 'from llama_index.packs.tables.chain_of_table.base import ChainOfTableQueryEngine, serialize_table\n'), ((2475, 2501), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (2489, 2501), False, 'from llama_index.core import PromptTemplate\n'), ((2595, 2631), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_c, llm]'}), '(chain=[prompt_c, llm])\n', (2608, 2631), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((1801, 1820), 'llama_index.packs.tables.chain_of_table.base.serialize_table', 'serialize_table', (['df'], {}), '(df)\n', (1816, 1820), False, 'from llama_index.packs.tables.chain_of_table.base import ChainOfTableQueryEngine, serialize_table\n'), ((2568, 2587), 'llama_index.packs.tables.chain_of_table.base.serialize_table', 'serialize_table', (['df'], {}), '(df)\n', (2583, 2587), False, 'from llama_index.packs.tables.chain_of_table.base import ChainOfTableQueryEngine, serialize_table\n')] |
from llama_hub.file.unstructured.base import UnstructuredReader
from unstructured.partition.auto import partition
from unstructured.documents.elements import NarrativeText
import llama_index as li
from pathlib import Path
import openai
import os
import re
import ast
import json
openai.api_key = 'your_key'
os.environ['OPENAI_API_KEY'] = 'your_key'
#Path to textbook pdf
pdf = Path(f'data/ex_textbook.pdf')
UnstructuredReader = li.download_loader("UnstructuredReader", refresh_cache=True, use_gpt_index_import=True)
loader = UnstructuredReader()
textbook = loader.load_data(file=pdf, split_documents=True)
elements = partition(filename=pdf)
#What you need to modify
#Start of every chapter
pattern1 = r"(\d+)\s+Chapter\s+(\d+):"
#End of every chapter introduction
pattern2 = r"^\d+[CE](?: [A-Z])+"
#End of last chapter
end = "Need to Know More?"
#Tracking where chapters start/end
chapter_found = {}
chapter_starts = []
intros = []
for iteration, element in enumerate(textbook):
match1 = re.search(pattern1, element.text)
match2 = re.search(pattern2, element.text)
if match1:
chapter_number = match1.group(2)
if chapter_number not in chapter_found:
chapter_found[chapter_number] = True
chapter_starts.append(iteration)
intros.append(iteration)
if match2:
intros.append(iteration)
#Finding where last chapter ends
for x in textbook[chapter_starts[len(chapter_starts) - 1]:]:
if x.text == end:
chapter_starts.append(textbook.index(x))
#Collecting chapter summaries for GPT prompts
summaries = []
iteration = 0
for x in intros[::2]:
temp =''
for element in elements[x:intros[iteration+1]]:
temp = temp + textbook[elements.index(element)].text + '\n'
summaries.append(temp)
iteration += 2
#Making dictionary with chapter as key and document objects as elements
directory = {}
chapter_num = 1
for x in range(len(chapter_starts) - 1):
text = []
for element in elements[chapter_starts[x]:chapter_starts[x+1]]:
if isinstance(element, NarrativeText):
text.append(textbook[elements.index(element)])
directory['Chapter ' + str(chapter_num)] = text
chapter_num += 1
#Combining all the narrative text of each chapter into one string and adding "This is Chapter 'x': " to the beginning and "This is the end of Chapter 'x'" to the end
final=[]
for chapter in directory:
txt = ''
for text in directory[chapter]:
txt = txt + text.text
directory[chapter][0].text = txt
final.append(directory[chapter][0])
for iteration, text in enumerate(final):
final[iteration].text = "This is Chapter " + str(iteration + 1) + ":\n" + text.text + "\nThis is the end of Chapter " + str(iteration + 1)
node_parser = li.node_parser.SimpleNodeParser()
nodes = node_parser.get_nodes_from_documents(final)
test_index = li.GPTVectorStoreIndex(nodes=nodes, chunk_size_limit=512)
query_engine = test_index.as_query_engine()
def create_questions(num_chapters):
form ="""[
{
"question": ,
"choices": ,
"correct_answer_index": ,
"explanation":
}
]
"""
final = []
for chapter in range(num_chapters):
temp = []
chap_num = str(chapter + 1)
summary = query_engine.query(f"""Elaborate on these key topics of chapter {chap_num} in detail:
{summaries[chapter]}
""" )
temp.append(str(summary))
response = query_engine.query(f"""
CHAPTER {chap_num}:
{str(summary)}
Please generate SIX different multiple choice questions that covers all of the above information. Must be variety in the type of questions (scenario questions, definitions, comparison questions) and some must have multiple correct answers. Do NOT reference the text in the questions and explanations themselves. Do not repeat any questions. In the explanation, provide more insight and also the chapter that it comes from
Return the result in the following JSON format:
{form}
""")
temp.append(ast.literal_eval(str(response)))
final.append(temp)
return final
questions = create_questions(len(chapter_starts) - 1)
test = []
for chap in questions:
for question in chap[1]:
test.append(question)
json_string = json.dumps(test, indent=4)
# Write the JSON string to a file
with open("website/ex_questions.json", "w") as json_file:
json_file.write(json_string)
| [
"llama_index.GPTVectorStoreIndex",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.download_loader"
] | [((379, 408), 'pathlib.Path', 'Path', (['f"""data/ex_textbook.pdf"""'], {}), "(f'data/ex_textbook.pdf')\n", (383, 408), False, 'from pathlib import Path\n'), ((431, 522), 'llama_index.download_loader', 'li.download_loader', (['"""UnstructuredReader"""'], {'refresh_cache': '(True)', 'use_gpt_index_import': '(True)'}), "('UnstructuredReader', refresh_cache=True,\n use_gpt_index_import=True)\n", (449, 522), True, 'import llama_index as li\n'), ((528, 548), 'llama_hub.file.unstructured.base.UnstructuredReader', 'UnstructuredReader', ([], {}), '()\n', (546, 548), False, 'from llama_hub.file.unstructured.base import UnstructuredReader\n'), ((620, 643), 'unstructured.partition.auto.partition', 'partition', ([], {'filename': 'pdf'}), '(filename=pdf)\n', (629, 643), False, 'from unstructured.partition.auto import partition\n'), ((2783, 2816), 'llama_index.node_parser.SimpleNodeParser', 'li.node_parser.SimpleNodeParser', ([], {}), '()\n', (2814, 2816), True, 'import llama_index as li\n'), ((2882, 2939), 'llama_index.GPTVectorStoreIndex', 'li.GPTVectorStoreIndex', ([], {'nodes': 'nodes', 'chunk_size_limit': '(512)'}), '(nodes=nodes, chunk_size_limit=512)\n', (2904, 2939), True, 'import llama_index as li\n'), ((4304, 4330), 'json.dumps', 'json.dumps', (['test'], {'indent': '(4)'}), '(test, indent=4)\n', (4314, 4330), False, 'import json\n'), ((999, 1032), 're.search', 're.search', (['pattern1', 'element.text'], {}), '(pattern1, element.text)\n', (1008, 1032), False, 'import re\n'), ((1046, 1079), 're.search', 're.search', (['pattern2', 'element.text'], {}), '(pattern2, element.text)\n', (1055, 1079), False, 'import re\n')] |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import nltk
nltk.download("stopwords")
import llama_index.core
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
)
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
index.set_index_id("vector_index")
index.storage_context.persist("./storage")
storage_context = StorageContext.from_defaults(persist_dir="storage")
index = load_index_from_storage(storage_context, index_id="vector_index")
query_engine = index.as_query_engine(response_mode="tree_summarize")
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
query_modes = [
"svm",
"linear_regression",
"logistic_regression",
]
for query_mode in query_modes:
query_engine = index.as_query_engine(vector_store_query_mode=query_mode)
response = query_engine.query("What did the author do growing up?")
print(f"Query mode: {query_mode}")
display(Markdown(f"<b>{response}</b>"))
display(Markdown(f"<b>{response}</b>"))
print(response.source_nodes[0].text)
from llama_index.core import QueryBundle
query_bundle = QueryBundle(
query_str="What did the author do growing up?",
custom_embedding_strs=["The author grew up painting."],
)
query_engine = index.as_query_engine()
response = query_engine.query(query_bundle)
display(Markdown(f"<b>{response}</b>"))
query_engine = index.as_query_engine(
vector_store_query_mode="mmr", vector_store_kwargs={"mmr_threshold": 0.2}
)
response = query_engine.query("What did the author do growing up?")
print(response.get_formatted_sources())
from llama_index.core import Document
doc = Document(text="target", metadata={"tag": "target"})
index.insert(doc)
from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters
filters = MetadataFilters(
filters=[ExactMatchFilter(key="tag", value="target")]
)
retriever = index.as_retriever(
similarity_top_k=20,
filters=filters,
)
source_nodes = retriever.retrieve("What did the author do growing up?")
print(len(source_nodes))
print(source_nodes[0].text)
print(source_nodes[0].metadata) | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.QueryBundle",
"llama_index.core.Document",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.vector_stores.ExactMatchFilter"
] | [((177, 203), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (190, 203), False, 'import nltk\n'), ((259, 317), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (278, 317), False, 'import logging\n'), ((895, 937), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (926, 937), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext\n'), ((1038, 1089), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""storage"""'}), "(persist_dir='storage')\n", (1066, 1089), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext\n'), ((1098, 1163), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'index_id': '"""vector_index"""'}), "(storage_context, index_id='vector_index')\n", (1121, 1163), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext\n'), ((1835, 1954), 'llama_index.core.QueryBundle', 'QueryBundle', ([], {'query_str': '"""What did the author do growing up?"""', 'custom_embedding_strs': "['The author grew up painting.']"}), "(query_str='What did the author do growing up?',\n custom_embedding_strs=['The author grew up painting.'])\n", (1846, 1954), False, 'from llama_index.core import QueryBundle\n'), ((2367, 2418), 'llama_index.core.Document', 'Document', ([], {'text': '"""target"""', 'metadata': "{'tag': 'target'}"}), "(text='target', metadata={'tag': 'target'})\n", (2375, 2418), False, 'from llama_index.core import Document\n'), ((349, 389), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (370, 389), False, 'import logging\n'), ((1314, 1344), 'IPython.display.Markdown', 'Markdown', (['f"""<b>{response}</b>"""'], {}), "(f'<b>{response}</b>')\n", (1322, 1344), False, 'from IPython.display import Markdown, display\n'), ((1703, 1733), 'IPython.display.Markdown', 'Markdown', (['f"""<b>{response}</b>"""'], {}), "(f'<b>{response}</b>')\n", (1711, 1733), False, 'from IPython.display import Markdown, display\n'), ((2055, 2085), 'IPython.display.Markdown', 'Markdown', (['f"""<b>{response}</b>"""'], {}), "(f'<b>{response}</b>')\n", (2063, 2085), False, 'from IPython.display import Markdown, display\n'), ((318, 337), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (335, 337), False, 'import logging\n'), ((828, 872), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data/paul_graham/"""'], {}), "('./data/paul_graham/')\n", (849, 872), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext\n'), ((1661, 1691), 'IPython.display.Markdown', 'Markdown', (['f"""<b>{response}</b>"""'], {}), "(f'<b>{response}</b>')\n", (1669, 1691), False, 'from IPython.display import Markdown, display\n'), ((2558, 2601), 'llama_index.core.vector_stores.ExactMatchFilter', 'ExactMatchFilter', ([], {'key': '"""tag"""', 'value': '"""target"""'}), "(key='tag', value='target')\n", (2574, 2601), False, 'from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters\n')] |
import gradio as gr
import os
from datetime import datetime
import logging
import sys
from llama_index import SimpleDirectoryReader
import llama_index.readers.file.base
import glob
import numpy as np
import soundfile as sf
import shutil
import openai
import json
import cv2
from llama_index import download_loader
ImageCaptionReader = download_loader('ImageCaptionReader')
openai.api_key = os.environ['OPENAI_API_KEY']
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# print('done processing import')
with open('config.json', encoding='utf8') as f:
config = json.load(f)
def process_inputs(text: str, image: np.ndarray, video: str, audio: tuple, ):
output = ""
# # print('video', type(video), video)
# # print('text', type(text), text)
# # print('audio', type(audio), audio)
# # print('image', type(image), image)
if not text and image is not None and not video and audio is not None:
return "Please upload at least one of the following: text, image, video, audio."
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# Create a folder named 'media_files' if it doesn't exist
os.makedirs(f"media_files/{timestamp}", exist_ok=True)
if video:
video_path = os.path.join("media_files", f"{timestamp}/video.mp4")
# copy from "video" to "video_path"
shutil.copyfile(video, video_path)
# os.rename(video_path, video_path)
ffmpeg_cmd = f'ffmpeg -i {video_path} -vf "select=not(mod(n\,100))" -vsync vfr media_files/{timestamp}/frame_%03d.jpg'
os.system(ffmpeg_cmd)
output += "Video processed and saved.\n"
print("Video processed and saved.")
# gr.Interface.update("Video saved.")
if text:
text_path = os.path.join("media_files", f"{timestamp}/text.txt")
with open(text_path, "w", encoding='utf8') as f:
f.write(text)
output += "Text processed and saved: " + text + "\n"
# print("Text processed and saved: " + text + "")
# gr.Interface.update("Text processed and saved: " + "")
if audio is not None:
sr, audio = audio
audio_path = os.path.join("media_files", f"{timestamp}/audio.mp3")
sf.write(audio_path, audio, sr)
output += "Audio processed and saved.\n"
print("Audio processed and saved.")
# gr.Interface.update("Audio saved.")
if image is not None:
image_path = os.path.join("media_files", f"{timestamp}/image.png")
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(image_path, image)
output += "Image processed and saved.\n"
print("Image processed and saved.")
# gr.Interface.update("Image saved.")
root = f"media_files/{timestamp}"
image_caption_prompt = "Question: Describe what you see in this image and if there are any dangers or emergencies there any dangers and how sever they are. Answer:"
text_files = glob.glob(f'{root}/*.txt')
text_content = ''
if text_files:
# print('processing text_files ...')
text_content = SimpleDirectoryReader(
input_files=text_files,
file_extractor={
".jpg": ImageCaptionReader(),
".png": ImageCaptionReader(),
".jpeg": ImageCaptionReader(),
".wav": llama_index.readers.file.video_audio_reader,
".mp4": llama_index.readers.file.video_audio_reader,
}
).load_data()
texts = [x.text for x in text_content]
text = '\n\n'.join(texts)
text_content = text.replace('"', "'").replace('\n', '. ')
# print('done processing text_files')
image_files = glob.glob(f'{root}/*.png') + glob.glob(f'{root}/*.jpg')
image_content = ''
if image_files:
# print('processing image_files ...')
image_content = SimpleDirectoryReader(
input_files=image_files,
file_extractor={
".jpg": ImageCaptionReader(),
".png": ImageCaptionReader(),
".jpeg": ImageCaptionReader(),
".wav": llama_index.readers.file.video_audio_reader,
".mp4": llama_index.readers.file.video_audio_reader,
}
).load_data()
texts = [x.text for x in image_content]
text = '\n\n'.join(texts)
image_content = text.replace('"', "'").replace('\n', '. ')
# print('done processing image_files')
audio_files = glob.glob(f'{root}/*.mp3')
audio_content = ''
if audio_files:
# print('processing audio_files ...')
audio_content = SimpleDirectoryReader(
input_files=audio_files,
file_extractor={
".jpg": ImageCaptionReader(),
".png": ImageCaptionReader(),
".jpeg": ImageCaptionReader(),
".mp3": llama_index.readers.file.video_audio_reader,
".mp4": llama_index.readers.file.video_audio_reader,
}
).load_data()
texts = [x.text for x in audio_content]
text = '\n\n'.join(texts)
audio_content = text.replace('"', "'").replace('\n', '. ')
# print('done processing audio_files')
video_files = glob.glob(f'{root}/*.mp4')
video_content = ''
if video_files:
# print('processing video_files ...')
video_content = SimpleDirectoryReader(
input_files=video_files,
file_extractor={
".jpg": ImageCaptionReader(),
".png": ImageCaptionReader(),
".jpeg": ImageCaptionReader(),
".mp3": llama_index.readers.file.video_audio_reader,
".mp4": llama_index.readers.file.video_audio_reader,
}
).load_data()
texts = [x.text for x in video_content]
text = '\n\n'.join(texts)
video_content = text.replace('"', "'").replace('\n', '. ')
# print('done processing video_files')
ar2en = {v:k for (k,v) in config["en2ar"].items()}
emergencies_en = [ar2en[k] for k in config['redirects']]
system_prompt = f"""I want you to act as a 911 operator that understands Arabic.
I will give you text and audio transcripts that the users upload in an emergency, and I need you to classify the different types of emergencies.
The incoming information could be Arabic or English, and you must output only in English.
The different types of emergencies are only one of {len(emergencies_en)}: {json.dumps(emergencies_en)}
I will give you the information provided by the user bellow, and you should classify from the {len(emergencies_en)} types of emergencies.
"""
prompt = """
=== User information for emergency
"""
if text_content:
prompt += f'User text: "{text_content}"\n'
if image_content:
prompt += f'User uploaded an image of: "{image_content}"\n'
if audio_content:
prompt += f'User uploaded an audio, the text in that audio sounds like: "{audio_content} {video_content}" \n'
prompt += """
=== End of user information for emergency
Now you must output only in JSON in the following format: {"emergency_class": string, "explaination_arabic": string}
Note that "explaination_arabic" must be in Arabic.
For the emergency_class, you must choose one of the following: """ + json.dumps(emergencies_en)
# print('prompt', prompt)
completion = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
]
)
# parse model JSON output
content = completion.choices[0].message.content
content = content.replace(",}", "}") # just in case
# start from first "{" until the first "}"
content = content[content.find("{") : content.find("}")+1]
# print('ChatGPT response:', content)
try:
result = json.loads(content)
except:
result = {
"emergency_class": "unknown",
"explaination_arabic": "Could not parse output.: " + content
}
emergency_class_ar = config['en2ar'].get(result['emergency_class'], "غير معروف")
redirects = config['redirects'].get(emergency_class_ar, ["<غير معروف>"])
output = f"""نوع الحالة: {emergency_class_ar}
الجهات المسؤولة:
- """ + ('\n - '.join(redirects)) + f"\n\nالشرح: {result['explaination_arabic']}"
return output if output else "No input provided."
video_input = gr.inputs.Video(optional=True, label="Input Video")
text_input = gr.inputs.Textbox(lines=3, optional=True, label="Input Text")
audio_input = gr.inputs.Audio(optional=True, label="Input Audio")
image_input = gr.inputs.Image(optional=True, label="Input Image")
output_text = gr.outputs.Textbox(label="Output Text")
examples = [
# text_input, image_input, video_input, audio_input
["", None,"data/fire_at_gas_station.mp4", None,],
["", "data/small-car-accident.jpg", None, None],
["", "data/electrical-fire.jpg", None, None],
["", "data/major-car-accident.jpg", None, None],
["", "data/gettyimages-50908538-612x612.jpg", None, None],
["", None, None, "data/fire_at_gas_station.mp3",],
["السلام عليكم، أنا أتصل لأبلغ عن حريق كبير في مبنى سكني بشارع المنصور. يبدو أن النيران اندلعت في الطابق الثالث وتنتشر بسرورة. يرجى إرسال رجال الإطفاء فوراً", None, None, None],
["السلام عليكم، أنا أتصل لأبلغ عن حادثة تحرش حدثت لي في مترو الأنفاق بمحطة المرج. كان هناك رجل يلمسني بشكل غير لائق ويحاول مضايقتي. يرجى إرسال دورية أمنية للموقع فوراً", None, None, None],
["السلام عليكم، أنا أتصل لأبلغ عن سرقة تعرضت لها قبل قليل. شخصان قاما بسلب هاتفي الجوال ومحفظتي تحت تهديد السلاح. حدث ذلك في حي النزهة بالقرب من متجر السوبر ماركت. أرجو إرسال دورية أمنية وفتح تحقيق في الواقعة", None, None, None],
]
iface = gr.Interface(
fn=process_inputs,
inputs=[text_input, image_input, video_input, audio_input],
outputs=output_text,
title="<img src='https://i.imgur.com/Qakrqvn.png' width='100' height='100'> منصة استجابة",
description="تحديد نوع المخاطر والحالات الطارئة تلقائيا باستخدام الذكاء الاصطناعي,\nشبيها بتطبيق 'كلنا امن' بامكانك رفع نص او صور اومقطع او صوت وسيتم تحديد نوع الحالة والجهات المسؤولة عنها",
examples=examples,
cache_examples=True,
)
# image = gr.Image("logo.png", style=(100, 100))
# iface.add(image)
# "text-align: right;"
# print('http://127.0.0.1:7860/?__theme=light')
iface.launch(
share=True,
favicon_path='logo.png'
)
| [
"llama_index.download_loader"
] | [((337, 374), 'llama_index.download_loader', 'download_loader', (['"""ImageCaptionReader"""'], {}), "('ImageCaptionReader')\n", (352, 374), False, 'from llama_index import download_loader\n'), ((423, 481), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (442, 481), False, 'import logging\n'), ((8563, 8614), 'gradio.inputs.Video', 'gr.inputs.Video', ([], {'optional': '(True)', 'label': '"""Input Video"""'}), "(optional=True, label='Input Video')\n", (8578, 8614), True, 'import gradio as gr\n'), ((8628, 8689), 'gradio.inputs.Textbox', 'gr.inputs.Textbox', ([], {'lines': '(3)', 'optional': '(True)', 'label': '"""Input Text"""'}), "(lines=3, optional=True, label='Input Text')\n", (8645, 8689), True, 'import gradio as gr\n'), ((8704, 8755), 'gradio.inputs.Audio', 'gr.inputs.Audio', ([], {'optional': '(True)', 'label': '"""Input Audio"""'}), "(optional=True, label='Input Audio')\n", (8719, 8755), True, 'import gradio as gr\n'), ((8770, 8821), 'gradio.inputs.Image', 'gr.inputs.Image', ([], {'optional': '(True)', 'label': '"""Input Image"""'}), "(optional=True, label='Input Image')\n", (8785, 8821), True, 'import gradio as gr\n'), ((8837, 8876), 'gradio.outputs.Textbox', 'gr.outputs.Textbox', ([], {'label': '"""Output Text"""'}), "(label='Output Text')\n", (8855, 8876), True, 'import gradio as gr\n'), ((9894, 10355), 'gradio.Interface', 'gr.Interface', ([], {'fn': 'process_inputs', 'inputs': '[text_input, image_input, video_input, audio_input]', 'outputs': 'output_text', 'title': '"""<img src=\'https://i.imgur.com/Qakrqvn.png\' width=\'100\' height=\'100\'> منصة استجابة"""', 'description': '"""تحديد نوع المخاطر والحالات الطارئة تلقائيا باستخدام الذكاء الاصطناعي,\nشبيها بتطبيق \'كلنا امن\' بامكانك رفع نص او صور اومقطع او صوت وسيتم تحديد نوع الحالة والجهات المسؤولة عنها"""', 'examples': 'examples', 'cache_examples': '(True)'}), '(fn=process_inputs, inputs=[text_input, image_input,\n video_input, audio_input], outputs=output_text, title=\n "<img src=\'https://i.imgur.com/Qakrqvn.png\' width=\'100\' height=\'100\'> منصة استجابة"\n , description=\n """تحديد نوع المخاطر والحالات الطارئة تلقائيا باستخدام الذكاء الاصطناعي,\nشبيها بتطبيق \'كلنا امن\' بامكانك رفع نص او صور اومقطع او صوت وسيتم تحديد نوع الحالة والجهات المسؤولة عنها"""\n , examples=examples, cache_examples=True)\n', (9906, 10355), True, 'import gradio as gr\n'), ((513, 553), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (534, 553), False, 'import logging\n'), ((651, 663), 'json.load', 'json.load', (['f'], {}), '(f)\n', (660, 663), False, 'import json\n'), ((1216, 1270), 'os.makedirs', 'os.makedirs', (['f"""media_files/{timestamp}"""'], {'exist_ok': '(True)'}), "(f'media_files/{timestamp}', exist_ok=True)\n", (1227, 1270), False, 'import os\n'), ((3028, 3054), 'glob.glob', 'glob.glob', (['f"""{root}/*.txt"""'], {}), "(f'{root}/*.txt')\n", (3037, 3054), False, 'import glob\n'), ((4565, 4591), 'glob.glob', 'glob.glob', (['f"""{root}/*.mp3"""'], {}), "(f'{root}/*.mp3')\n", (4574, 4591), False, 'import glob\n'), ((5323, 5349), 'glob.glob', 'glob.glob', (['f"""{root}/*.mp4"""'], {}), "(f'{root}/*.mp4')\n", (5332, 5349), False, 'import glob\n'), ((7486, 7627), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-4"""', 'messages': "[{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content':\n prompt}]"}), "(model='gpt-4', messages=[{'role': 'system',\n 'content': system_prompt}, {'role': 'user', 'content': prompt}])\n", (7514, 7627), False, 'import openai\n'), ((482, 501), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (499, 501), False, 'import logging\n'), ((1308, 1361), 'os.path.join', 'os.path.join', (['"""media_files"""', 'f"""{timestamp}/video.mp4"""'], {}), "('media_files', f'{timestamp}/video.mp4')\n", (1320, 1361), False, 'import os\n'), ((1415, 1449), 'shutil.copyfile', 'shutil.copyfile', (['video', 'video_path'], {}), '(video, video_path)\n', (1430, 1449), False, 'import shutil\n'), ((1638, 1659), 'os.system', 'os.system', (['ffmpeg_cmd'], {}), '(ffmpeg_cmd)\n', (1647, 1659), False, 'import os\n'), ((1834, 1886), 'os.path.join', 'os.path.join', (['"""media_files"""', 'f"""{timestamp}/text.txt"""'], {}), "('media_files', f'{timestamp}/text.txt')\n", (1846, 1886), False, 'import os\n'), ((2228, 2281), 'os.path.join', 'os.path.join', (['"""media_files"""', 'f"""{timestamp}/audio.mp3"""'], {}), "('media_files', f'{timestamp}/audio.mp3')\n", (2240, 2281), False, 'import os\n'), ((2290, 2321), 'soundfile.write', 'sf.write', (['audio_path', 'audio', 'sr'], {}), '(audio_path, audio, sr)\n', (2298, 2321), True, 'import soundfile as sf\n'), ((2509, 2562), 'os.path.join', 'os.path.join', (['"""media_files"""', 'f"""{timestamp}/image.png"""'], {}), "('media_files', f'{timestamp}/image.png')\n", (2521, 2562), False, 'import os\n'), ((2579, 2617), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (2591, 2617), False, 'import cv2\n'), ((2626, 2656), 'cv2.imwrite', 'cv2.imwrite', (['image_path', 'image'], {}), '(image_path, image)\n', (2637, 2656), False, 'import cv2\n'), ((3778, 3804), 'glob.glob', 'glob.glob', (['f"""{root}/*.png"""'], {}), "(f'{root}/*.png')\n", (3787, 3804), False, 'import glob\n'), ((3807, 3833), 'glob.glob', 'glob.glob', (['f"""{root}/*.jpg"""'], {}), "(f'{root}/*.jpg')\n", (3816, 3833), False, 'import glob\n'), ((7410, 7436), 'json.dumps', 'json.dumps', (['emergencies_en'], {}), '(emergencies_en)\n', (7420, 7436), False, 'import json\n'), ((7998, 8017), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (8008, 8017), False, 'import json\n'), ((1109, 1123), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1121, 1123), False, 'from datetime import datetime\n'), ((6598, 6624), 'json.dumps', 'json.dumps', (['emergencies_en'], {}), '(emergencies_en)\n', (6608, 6624), False, 'import json\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index", "import llama_index"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index", "from llama_index"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
init_file_path = local_dir_path / "__init__.py"
# if the __init__.py file do not exists, we need to create it
mode = "a+" if not os.path.exists(init_file_path) else "r+"
with open(init_file_path, mode) as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
skip_load: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
if skip_load:
return None
# loads the module into memory
if override_path:
path = f"{dirpath}/{base_file_name}"
spec = util.spec_from_file_location("custom_module", location=path)
if spec is None:
raise ValueError(f"Could not find file: {path}.")
else:
path = f"{dirpath}/{module_id}/{base_file_name}"
spec = util.spec_from_file_location("custom_module", location=path)
if spec is None:
raise ValueError(f"Could not find file: {path}.")
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.download.utils.get_exports",
"llama_index.download.utils.initialize_directory"
] | [((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5550, 5583), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5564, 5583), False, 'import os\n'), ((7432, 7500), 'llama_index.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7452, 7500), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8830, 8857), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8851, 8857), False, 'from importlib import util\n'), ((1197, 1217), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1201, 1217), False, 'from pathlib import Path\n'), ((1434, 1468), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1448, 1468), False, 'import os\n'), ((1938, 1969), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1948, 1969), False, 'import json\n'), ((2263, 2298), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2278, 2298), False, 'import os\n'), ((3131, 3151), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3135, 3151), False, 'from pathlib import Path\n'), ((3347, 3386), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3358, 3386), False, 'import os\n'), ((5139, 5172), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5153, 5172), False, 'import os\n'), ((8438, 8498), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8466, 8498), False, 'from importlib import util\n'), ((8668, 8728), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8696, 8728), False, 'from importlib import util\n'), ((9280, 9382), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9293, 9382), False, 'import requests\n'), ((1536, 1548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1545, 1548), False, 'import json\n'), ((3310, 3337), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3324, 3337), False, 'import os\n'), ((4385, 4420), 'llama_index.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4396, 4420), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2356, 2389), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2370, 2389), False, 'import os\n'), ((2407, 2437), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2418, 2437), False, 'import os\n'), ((5835, 5927), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5856, 5927), False, 'import subprocess\n'), ((4620, 4650), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4634, 4650), False, 'import os\n'), ((5675, 5698), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5679, 5698), False, 'from pathlib import Path\n')] |
import os
import openai
import logging
import sys
import llama_index
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
LLMPredictor,
PromptHelper,
ServiceContext,
)
from llama_index.llms import OpenAI
import chromadb
from llama_index.vector_stores import ChromaVectorStore
from llama_index.embeddings import OpenAIEmbedding
from trulens_eval import Tru
from llama_index.query_engine import CitationQueryEngine
import json
openai.api_key = os.environ["OPENAI_API_KEY"]
CUSTOM_QUERY = "First greet yourself and Send me a summary of the file. In your summary, make sure to mention the file location and the data name, also to have 10 bullet points. Each bullet point should be on a new row. Try to incorporate few key points from all the text. Do it step by step:"
list_of_indices = []
tru = Tru()
tru.reset_database()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
def create_index(directory, unique_folder_id):
llm = OpenAI(temperature=0.1, model="gpt-4-vision-preview", max_tokens=512)
prompt_helper = PromptHelper(
context_window=4096,
num_output=256,
chunk_overlap_ratio=0.1,
chunk_size_limit=None,
)
service_context = ServiceContext.from_defaults(llm=llm, prompt_helper=prompt_helper)
documents = SimpleDirectoryReader(input_dir=directory).load_data()
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
index.set_index_id(create_dynamic_vector_ids(unique_folder_id))
index.storage_context.persist(create_dynamic_storage_contexts(unique_folder_id))
a = index.index_struct_cls
# Chroma vector store for easy indexing and retrieval
db = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db.get_or_create_collection("investment_ai")
chroma_vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
chroma_storage_context = StorageContext.from_defaults(
vector_store=chroma_vector_store
)
chroma_index = VectorStoreIndex.from_documents(
documents,
storage_context=chroma_storage_context,
service_context=service_context,
)
print(chroma_index.storage_context.graph_store.get)
return index
def auto_summarization(unique_folder_id):
dynamic_storage_context = create_dynamic_storage_contexts(unique_folder_id)
dynamic_vector_id = create_dynamic_vector_ids(unique_folder_id)
storage_context = StorageContext.from_defaults(persist_dir=dynamic_storage_context)
# load index
index = load_index_from_storage(storage_context, index_id=dynamic_vector_id)
query_engine = index.as_query_engine(response_mode="compact", verbose=True)
response = query_engine.query(CUSTOM_QUERY)
return str(response.response)
return str(response.response)
def ask_question(query, unique_folder_id):
dynamic_storage_context = create_dynamic_storage_contexts(unique_folder_id)
dynamic_vector_id = create_dynamic_vector_ids(unique_folder_id)
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=dynamic_storage_context)
# load index
index = llama_index.indices.loading.load_index_from_storage(
storage_context, index_id=dynamic_vector_id
)
query_engine = CitationQueryEngine.from_args(
index, similarity_top_k=3, citation_chunk_size=512, streaming=True
)
response_stream = query_engine.query(
"When a question is asked always and if it is a greeting please answer accordingly.If question is not about given data, say you only answer about given data. If the question is about the given data please eloborate more on details and answer human-like according to this question: "
+ query
)
return response_stream
def create_dynamic_storage_contexts(unique_folder_id):
return "./storage_" + str(unique_folder_id)
def create_dynamic_vector_ids(unique_folder_id):
return "vector_index_" + str(unique_folder_id)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.query_engine.CitationQueryEngine.from_args",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.StorageContext.from_defaults",
"llama_index.indices.loading.load_index_from_storage",
"llama_index.PromptHelper",
"llama_index.load_index_from_storage"
] | [((879, 884), 'trulens_eval.Tru', 'Tru', ([], {}), '()\n', (882, 884), False, 'from trulens_eval import Tru\n'), ((907, 965), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (926, 965), False, 'import logging\n'), ((997, 1037), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (1018, 1037), False, 'import logging\n'), ((1098, 1167), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.1)', 'model': '"""gpt-4-vision-preview"""', 'max_tokens': '(512)'}), "(temperature=0.1, model='gpt-4-vision-preview', max_tokens=512)\n", (1104, 1167), False, 'from llama_index.llms import OpenAI\n'), ((1188, 1289), 'llama_index.PromptHelper', 'PromptHelper', ([], {'context_window': '(4096)', 'num_output': '(256)', 'chunk_overlap_ratio': '(0.1)', 'chunk_size_limit': 'None'}), '(context_window=4096, num_output=256, chunk_overlap_ratio=0.1,\n chunk_size_limit=None)\n', (1200, 1289), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((1348, 1414), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'prompt_helper': 'prompt_helper'}), '(llm=llm, prompt_helper=prompt_helper)\n', (1376, 1414), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((1500, 1575), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1531, 1575), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((1828, 1873), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (1853, 1873), False, 'import chromadb\n'), ((1970, 2024), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (1987, 2024), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((2055, 2117), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'chroma_vector_store'}), '(vector_store=chroma_vector_store)\n', (2083, 2117), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((2152, 2272), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'chroma_storage_context', 'service_context': 'service_context'}), '(documents, storage_context=\n chroma_storage_context, service_context=service_context)\n', (2183, 2272), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((2586, 2651), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'dynamic_storage_context'}), '(persist_dir=dynamic_storage_context)\n', (2614, 2651), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((2681, 2749), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'index_id': 'dynamic_vector_id'}), '(storage_context, index_id=dynamic_vector_id)\n', (2704, 2749), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((3192, 3257), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'dynamic_storage_context'}), '(persist_dir=dynamic_storage_context)\n', (3220, 3257), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((3287, 3387), 'llama_index.indices.loading.load_index_from_storage', 'llama_index.indices.loading.load_index_from_storage', (['storage_context'], {'index_id': 'dynamic_vector_id'}), '(storage_context,\n index_id=dynamic_vector_id)\n', (3338, 3387), False, 'import llama_index\n'), ((3417, 3518), 'llama_index.query_engine.CitationQueryEngine.from_args', 'CitationQueryEngine.from_args', (['index'], {'similarity_top_k': '(3)', 'citation_chunk_size': '(512)', 'streaming': '(True)'}), '(index, similarity_top_k=3,\n citation_chunk_size=512, streaming=True)\n', (3446, 3518), False, 'from llama_index.query_engine import CitationQueryEngine\n'), ((966, 985), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (983, 985), False, 'import logging\n'), ((1432, 1474), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'directory'}), '(input_dir=directory)\n', (1453, 1474), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n')] |
import streamlit as st
import llama_index
from llama_index import StorageContext, load_index_from_storage
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores import SimpleVectorStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index import KeywordTableIndex
from llama_index.indices.keyword_table import SimpleKeywordTableIndex
from llama_index import ResponseSynthesizer
from llama_index.indices.postprocessor import SimilarityPostprocessor
from llama_index.retrievers import VectorIndexRetriever
from llama_index.retrievers import ListIndexRetriever
from llama_index.retrievers import TreeRootRetriever
from llama_index.indices.keyword_table.retrievers import KeywordTableGPTRetriever
from llama_index.indices.keyword_table import GPTSimpleKeywordTableIndex
from llama_index.indices.keyword_table.retrievers import KeywordTableRAKERetriever
from llama_index.indices.keyword_table.retrievers import KeywordTableSimpleRetriever
from llama_index import Prompt
from llama_index import LLMPredictor
from langchain.chat_models import ChatOpenAI
from llama_index import ServiceContext
print("1")
storage_context_1 = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir="vector_store"),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir="vector_store"),
index_store=SimpleIndexStore.from_persist_dir(persist_dir="vector_store"),
)
storage_context_2 = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir="table"),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir="table"),
index_store=SimpleIndexStore.from_persist_dir(persist_dir="table"),
)
storage_context_3 = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir="tree"),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir="tree"),
index_store=SimpleIndexStore.from_persist_dir(persist_dir="tree"),
)
storage_context_4 = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir="list"),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir="list"),
index_store=SimpleIndexStore.from_persist_dir(persist_dir="list"),
)
print("2")
from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage
indices1 = load_index_from_storage(storage_context_1)
indices2 = load_index_from_storage(storage_context_2)
indices3 = load_index_from_storage(storage_context_3)
indices4 = load_index_from_storage(storage_context_4)
# indices1 = load_index_from_storage(storage_context="vector_store")
index = [indices1, indices2, indices3, indices4]
print("3")
print("4")
from llama_index.indices.response import BaseResponseBuilder
# configure response synthesizer
response_synthesizer = ResponseSynthesizer.from_args(
# node_postprocessors=[
# ]
)
print("5")
TEMPLATE_STR = (
"We have provided context information below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given this information, please answer the question: {query_str}\n"
)
QA_TEMPLATE = Prompt(TEMPLATE_STR)
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", streaming=True))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size=1024)
query_engine1 = indices3.as_query_engine(service_context=service_context, text_qa_template=QA_TEMPLATE, similarity_top_k=3, streaming=True, )
response = query_engine1.query('How much package has government of india announced?')
# print("7")
str(response)
print(response)
# response.source_nodes
print(response.source_nodes)
########## working ########## | [
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.ServiceContext.from_defaults",
"llama_index.Prompt",
"llama_index.vector_stores.SimpleVectorStore.from_persist_dir",
"llama_index.ResponseSynthesizer.from_args",
"llama_index.load_index_from_storage"
] | [((2439, 2481), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context_1'], {}), '(storage_context_1)\n', (2462, 2481), False, 'from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage\n'), ((2493, 2535), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context_2'], {}), '(storage_context_2)\n', (2516, 2535), False, 'from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage\n'), ((2547, 2589), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context_3'], {}), '(storage_context_3)\n', (2570, 2589), False, 'from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage\n'), ((2601, 2643), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context_4'], {}), '(storage_context_4)\n', (2624, 2643), False, 'from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage\n'), ((2909, 2940), 'llama_index.ResponseSynthesizer.from_args', 'ResponseSynthesizer.from_args', ([], {}), '()\n', (2938, 2940), False, 'from llama_index import ResponseSynthesizer\n'), ((3238, 3258), 'llama_index.Prompt', 'Prompt', (['TEMPLATE_STR'], {}), '(TEMPLATE_STR)\n', (3244, 3258), False, 'from llama_index import Prompt\n'), ((3383, 3457), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(1024)'}), '(llm_predictor=llm_predictor, chunk_size=1024)\n', (3411, 3457), False, 'from llama_index import ServiceContext\n'), ((1282, 1346), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': '"""vector_store"""'}), "(persist_dir='vector_store')\n", (1318, 1346), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((1365, 1427), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': '"""vector_store"""'}), "(persist_dir='vector_store')\n", (1399, 1427), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((1445, 1506), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': '"""vector_store"""'}), "(persist_dir='vector_store')\n", (1478, 1506), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((1574, 1631), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': '"""table"""'}), "(persist_dir='table')\n", (1610, 1631), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((1650, 1705), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': '"""table"""'}), "(persist_dir='table')\n", (1684, 1705), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((1723, 1777), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': '"""table"""'}), "(persist_dir='table')\n", (1756, 1777), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((1844, 1900), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': '"""tree"""'}), "(persist_dir='tree')\n", (1880, 1900), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((1919, 1973), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': '"""tree"""'}), "(persist_dir='tree')\n", (1953, 1973), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((1991, 2044), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': '"""tree"""'}), "(persist_dir='tree')\n", (2024, 2044), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((2111, 2167), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': '"""list"""'}), "(persist_dir='list')\n", (2147, 2167), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((2186, 2240), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': '"""list"""'}), "(persist_dir='list')\n", (2220, 2240), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((2258, 2311), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': '"""list"""'}), "(persist_dir='list')\n", (2291, 2311), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((3293, 3362), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)'}), "(temperature=0, model_name='gpt-3.5-turbo', streaming=True)\n", (3303, 3362), False, 'from langchain.chat_models import ChatOpenAI\n')] |
# https://www.youtube.com/watch?v=oDzWsynpOyI
import logging
import sys
import os
from dotenv import load_dotenv
load_dotenv()
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
ServiceContext,
Document,
)
import json
import llama_index
from llama_index.llms import AzureOpenAI
from llama_index.node_parser import (
SentenceWindowNodeParser,
HierarchicalNodeParser,
get_leaf_nodes,
)
from llama_index.text_splitter import SentenceSplitter
from llama_index.embeddings import AzureOpenAIEmbedding, HuggingFaceEmbedding
from llama_index.schema import MetadataMode
from llama_index.postprocessor import (
MetadataReplacementPostProcessor,
SimilarityPostprocessor,
)
from llama_index import set_global_service_context
from llama_index.llms.types import ChatMessage
import chromadb
from llama_index.vector_stores import ChromaVectorStore
### THE LLM
api_key = os.getenv("AZURE_OPENAI_API_KEY")
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
api_version = os.getenv("OPENAI_API_VERSION")
llm = AzureOpenAI(
engine="chat",
temperature=0.1,
api_key=api_key,
azure_endpoint=azure_endpoint,
api_version=api_version,
)
embed_model = AzureOpenAIEmbedding(
azure_deployment="embeddings",
api_key=api_key,
azure_endpoint=azure_endpoint,
api_version=api_version,
)
def _print_docs(docs):
# inspect documents
print("length of documents: ", str(len(docs)))
print("-----")
print(docs)
print("-----Metadata-----")
for doc in docs:
print(doc.metadata)
def _print_nodes(name, nodes):
print("-----" + name + "-----")
counter = 1
for node in nodes:
print(f"-----Node {counter}")
dict_node = dict(node)
print(dict_node)
counter += 1
print("-----")
def _create_text_qa_template():
from llama_index.llms import ChatMessage, MessageRole
from llama_index.prompts import ChatPromptTemplate
# Text QA Prompt
chat_text_qa_msgs = [
ChatMessage(
role=MessageRole.SYSTEM,
content=(
"You are an helpful chat assistant. You are here to help the user.Answer must be in the original language."
),
),
ChatMessage(
role=MessageRole.USER,
content=(
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge,"
"answer the question: {query_str}\n"
),
),
]
text_qa_template = ChatPromptTemplate(chat_text_qa_msgs)
return text_qa_template
def _create_refine_template():
from llama_index.llms import ChatMessage, MessageRole
from llama_index.prompts import ChatPromptTemplate
# Refine Prompt
chat_refine_msgs = [
ChatMessage(
role=MessageRole.SYSTEM,
content=("Always answer the question, even if the context isn't helpful."),
),
ChatMessage(
role=MessageRole.USER,
content=(
"We have the opportunity to refine the original answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the question: {query_str}. "
"If the context isn't useful, output the original answer again.\n"
"Original Answer: {existing_answer}"
),
),
]
refine_template = ChatPromptTemplate(chat_refine_msgs)
return refine_template
def create_window_nodes(path="./sample-docs/"):
# get the file
documents = SimpleDirectoryReader(path).load_data()
# _print_docs(documents)
sentence_node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
window_nodes = sentence_node_parser.get_nodes_from_documents(documents)
# _print_nodes("WINDOW NODES", window_nodes)
return window_nodes
def create_base_nodes(path="./sample-docs/"):
# get the file
documents = SimpleDirectoryReader(path).load_data()
# _print_docs(documents)
base_node_parser = SentenceSplitter()
base_nodes = base_node_parser.get_nodes_from_documents(documents)
# _print_nodes("BASE NODES", base_nodes)
return base_nodes
def save_on_chroma_and_get_index(nodes, collection_name):
### CREATE THE VECTOR STORES
### SAVING VECTORS ON DISK
db = chromadb.PersistentClient(path="./chroma_db")
vector_collection = db.get_or_create_collection(collection_name)
vector_store = ChromaVectorStore(chroma_collection=vector_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
ctx = ServiceContext.from_defaults(
llm=llm, embed_model=embed_model, node_parser=nodes
)
index = VectorStoreIndex(
nodes, storage_context=storage_context, service_context=ctx
)
return index
def get_index(collection_name):
db2 = chromadb.PersistentClient(path="./chroma_db")
service_context = ServiceContext.from_defaults(embed_model=embed_model, llm=llm)
collection = db2.get_or_create_collection(collection_name)
vector_store = ChromaVectorStore(chroma_collection=collection)
index = VectorStoreIndex.from_vector_store(
vector_store,
service_context=service_context,
)
return index
def run_window_index_sample(question):
window_index = get_index("window-detrazioni")
text_qa_template = _create_text_qa_template()
refine_template = _create_refine_template()
window_query_engine = window_index.as_query_engine(
similarity_top_k=5,
verbose=True,
text_qa_template=text_qa_template,
# refine_template=refine_template,
node_postprocessor=MetadataReplacementPostProcessor(
target_metadata_key="window",
)
# node_postprocessors=[
# SimilarityPostprocessor(similarity_cutoff=0.7),
# MetadataReplacementPostProcessor(
# target_metadata_key="window",
# ),
# ],
)
base_response = window_query_engine.query(question)
print(base_response)
def run_base_index_sample(question):
base_index = get_index("base-detrazioni")
text_qa_template = _create_text_qa_template()
refine_template = _create_refine_template()
# Query engine
# base_query_engine = base_index.as_query_engine(
# verbose=True,
# text_qa_template=text_qa_template,
# # refine_template=refine_template,
# )
# chat engine
base_query_engine = base_index.as_chat_engine()
base_response = base_query_engine.chat(question)
print(base_response)
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# windows_node = create_window_nodes()
# window_index = save_on_chroma_and_get_index(windows_node, "window-detrazioni")
### INFERENCE
question = "question!!!"
# window_index = run_window_index_sample(question=question)
base_index = run_base_index_sample(question=question)
# ### TODO : TO INVESTIGATE
# ### SAVING INDEX DEFINITION ON DISK
# ### this is useful to avoid having to recreate the index every time so we can save money
# ### from embedding calls
# window_index.storage_context.persist(persist_dir="./window-indexes")
# base_index.storage_context.persist(persist_dir="./base-indexes")
# ### RELOAD INDEXES FROM DISK
# SC_retrieved_window = storage_context_window.from_defaults(
# persist_dir="./window-indexes"
# )
# SC_retrieved_base = storage_context_base.from_defaults(persist_dir="./base-indexes")
# retrieved_window_index = load_index_from_storage(SC_retrieved_window)
# retrieved_base_index = load_index_from_storage(SC_retrieved_base)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.embeddings.AzureOpenAIEmbedding",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.llms.ChatMessage",
"llama_index.llms.AzureOpenAI",
"llama_index.postprocessor.MetadataReplacementPostProcessor",
"llama_index.prompts.ChatPromptTemplate",
"llama_index.VectorStoreIndex",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.text_splitter.SentenceSplitter"
] | [((116, 129), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (127, 129), False, 'from dotenv import load_dotenv\n'), ((963, 996), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_API_KEY"""'], {}), "('AZURE_OPENAI_API_KEY')\n", (972, 996), False, 'import os\n'), ((1014, 1048), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_ENDPOINT"""'], {}), "('AZURE_OPENAI_ENDPOINT')\n", (1023, 1048), False, 'import os\n'), ((1063, 1094), 'os.getenv', 'os.getenv', (['"""OPENAI_API_VERSION"""'], {}), "('OPENAI_API_VERSION')\n", (1072, 1094), False, 'import os\n'), ((1102, 1223), 'llama_index.llms.AzureOpenAI', 'AzureOpenAI', ([], {'engine': '"""chat"""', 'temperature': '(0.1)', 'api_key': 'api_key', 'azure_endpoint': 'azure_endpoint', 'api_version': 'api_version'}), "(engine='chat', temperature=0.1, api_key=api_key, azure_endpoint\n =azure_endpoint, api_version=api_version)\n", (1113, 1223), False, 'from llama_index.llms import AzureOpenAI\n'), ((1258, 1386), 'llama_index.embeddings.AzureOpenAIEmbedding', 'AzureOpenAIEmbedding', ([], {'azure_deployment': '"""embeddings"""', 'api_key': 'api_key', 'azure_endpoint': 'azure_endpoint', 'api_version': 'api_version'}), "(azure_deployment='embeddings', api_key=api_key,\n azure_endpoint=azure_endpoint, api_version=api_version)\n", (1278, 1386), False, 'from llama_index.embeddings import AzureOpenAIEmbedding, HuggingFaceEmbedding\n'), ((2716, 2753), 'llama_index.prompts.ChatPromptTemplate', 'ChatPromptTemplate', (['chat_text_qa_msgs'], {}), '(chat_text_qa_msgs)\n', (2734, 2753), False, 'from llama_index.prompts import ChatPromptTemplate\n'), ((3774, 3810), 'llama_index.prompts.ChatPromptTemplate', 'ChatPromptTemplate', (['chat_refine_msgs'], {}), '(chat_refine_msgs)\n', (3792, 3810), False, 'from llama_index.prompts import ChatPromptTemplate\n'), ((4020, 4152), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(3)', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=3, window_metadata_key=\n 'window', original_text_metadata_key='original_text')\n", (4058, 4152), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((4505, 4523), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (4521, 4523), False, 'from llama_index.text_splitter import SentenceSplitter\n'), ((4796, 4841), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (4821, 4841), False, 'import chromadb\n'), ((4931, 4985), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'vector_collection'}), '(chroma_collection=vector_collection)\n', (4948, 4985), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((5008, 5063), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (5036, 5063), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((5074, 5160), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'nodes'}), '(llm=llm, embed_model=embed_model, node_parser=\n nodes)\n', (5102, 5160), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((5183, 5260), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'storage_context': 'storage_context', 'service_context': 'ctx'}), '(nodes, storage_context=storage_context, service_context=ctx)\n', (5199, 5260), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((5337, 5382), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (5362, 5382), False, 'import chromadb\n'), ((5405, 5467), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm': 'llm'}), '(embed_model=embed_model, llm=llm)\n', (5433, 5467), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((5551, 5598), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'collection'}), '(chroma_collection=collection)\n', (5568, 5598), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((5611, 5697), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store, service_context=\n service_context)\n', (5645, 5697), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((7100, 7158), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (7119, 7158), False, 'import logging\n'), ((2067, 2230), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': '"""You are an helpful chat assistant. You are here to help the user.Answer must be in the original language."""'}), "(role=MessageRole.SYSTEM, content=\n 'You are an helpful chat assistant. You are here to help the user.Answer must be in the original language.'\n )\n", (2078, 2230), False, 'from llama_index.llms import ChatMessage, MessageRole\n'), ((2297, 2532), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': '"""Context information is below.\n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge,answer the question: {query_str}\n"""'}), '(role=MessageRole.USER, content=\n """Context information is below.\n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge,answer the question: {query_str}\n"""\n )\n', (2308, 2532), False, 'from llama_index.llms import ChatMessage, MessageRole\n'), ((2984, 3099), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': '"""Always answer the question, even if the context isn\'t helpful."""'}), '(role=MessageRole.SYSTEM, content=\n "Always answer the question, even if the context isn\'t helpful.")\n', (2995, 3099), False, 'from llama_index.llms import ChatMessage, MessageRole\n'), ((3141, 3533), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': '"""We have the opportunity to refine the original answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context, refine the original answer to better answer the question: {query_str}. If the context isn\'t useful, output the original answer again.\nOriginal Answer: {existing_answer}"""'}), '(role=MessageRole.USER, content=\n """We have the opportunity to refine the original answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context, refine the original answer to better answer the question: {query_str}. If the context isn\'t useful, output the original answer again.\nOriginal Answer: {existing_answer}"""\n )\n', (3152, 3533), False, 'from llama_index.llms import ChatMessage, MessageRole\n'), ((7194, 7234), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (7215, 7234), False, 'import logging\n'), ((3923, 3950), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {}), '(path)\n', (3944, 3950), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((4412, 4439), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {}), '(path)\n', (4433, 4439), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((6143, 6205), 'llama_index.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (6175, 6205), False, 'from llama_index.postprocessor import MetadataReplacementPostProcessor, SimilarityPostprocessor\n'), ((7163, 7182), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7180, 7182), False, 'import logging\n')] |
from importlib import metadata
from pathlib import WindowsPath
from re import sub
from llama_index import (
ServiceContext,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
global_service_context,
)
import llama_index
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index.schema import TextNode, MetadataMode
from llama_index.vector_stores import MilvusVectorStore
from llama_index.readers import SimpleDirectoryReader
from llama_index.node_parser import SentenceWindowNodeParser, SentenceSplitter, SimpleNodeParser
from llama_index.postprocessor import (
SimilarityPostprocessor,
MetadataReplacementPostProcessor,
)
from milvus import default_server
import os
from typing import List, Dict, Any, Optional
from chatbot.common import DATA_PATH, EMBEDDING_DIM, EMBEDDING_MODEL, SIMILARITY_SEARCH_THRESHOLD, path_leaf, subjects, PathSep, debug
class AugmentedIngestPipeline:
def __init__(
self, data_dir_path: str, service_context: ServiceContext,
) -> None:
self.data_dir = data_dir_path
self.service_ctx = service_context
self.embed_model = self.service_ctx.embed_model
self.vector_indexes = {}
self.metadata_fn = lambda x: {"title": x.replace("_", " ")}
self.node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
include_metadata=True,
)
self.create = False
def _load_data(self, path):
docs = SimpleDirectoryReader(
path, file_metadata=self.metadata_fn, filename_as_id=True
).load_data()
return docs
def _make_nodes(self, docs):
nodes = self.node_parser.get_nodes_from_documents(docs, show_progress=debug)
return nodes
def _insert_into_vectorstore(self, subject, nodes, create=False):
collection_name = f"augmentED_{subject}"
vector_store = MilvusVectorStore(
dim=EMBEDDING_DIM,
host="127.0.0.1",
port=default_server.listen_port,
collection_name=collection_name,
overwrite=create,
)
storage_ctx = StorageContext.from_defaults(vector_store=vector_store)
self.vector_indexes[subject] = VectorStoreIndex(
nodes=nodes,
service_context=self.service_ctx,
storage_context=storage_ctx,
)
def _load_vectorstore(self, subject):
collection_name = f"augmentED_{subject}"
vector_store = MilvusVectorStore(
dim=EMBEDDING_DIM,
host="127.0.0.1",
port=default_server.listen_port,
collection_name=collection_name,
overwrite=False
)
storage_ctx = StorageContext.from_defaults(vector_store=vector_store)
self.vector_indexes[subject] = VectorStoreIndex.from_vector_store(
vector_store=vector_store,
service_context=self.service_ctx,
storage_context=storage_ctx,
)
def _get_subject_query_engine(self, subject):
query_engine = self.vector_indexes[subject].as_query_engine(
similarity_top_k=3,
node_postprocessors=[
SimilarityPostprocessor(similarity_cutoff=SIMILARITY_SEARCH_THRESHOLD),
MetadataReplacementPostProcessor(target_metadata_key="window")
],
)
return query_engine
def _get_subject_chat_engine(self, subject):
query_engine = self.vector_indexes[subject].as_chat_engine(
mode="context",
similarity_top_k=2,
node_postprocessors=[
SimilarityPostprocessor(similarity_cutoff=SIMILARITY_SEARCH_THRESHOLD),
MetadataReplacementPostProcessor(target_metadata_key="window")
],
)
return query_engine
def run_pipeline(self, create=False):
self.create = create
if self.create:
self.one_giant_index_nodes = []
self.all_docs = []
for subject in subjects:
path = self.data_dir + PathSep + subjects[subject]
docs = self._load_data(path)
nodes = self._make_nodes(docs)
self._insert_into_vectorstore(subject=subject, nodes=nodes)
self.one_giant_index_nodes.extend(nodes)
self.all_docs.extend(docs)
self._insert_into_vectorstore(
subject="OneGiantIndex", nodes=self.one_giant_index_nodes, create=self.create
)
else:
for subject in subjects:
self._load_vectorstore(subject)
self._load_vectorstore("OneGiantIndex")
self.one_giant_index = self.vector_indexes["OneGiantIndex"]
self.query_everything = self._get_subject_query_engine("OneGiantIndex")
def search_one_giant_index(
self,
query,
top_k=10,
replace_with_meta=True,
metadata_key="title",
):
retr = self.one_giant_index.as_retriever(
similarity_top_k=top_k,
)
answers = retr.retrieve(query)
if replace_with_meta:
return list(set(map(lambda x: x.metadata[metadata_key], answers)))
else:
return list(
map(lambda x: x.get_content(metadata_mode=MetadataMode.LLM), answers)
)
def query_one_file(self,file_path):
docs = SimpleDirectoryReader(
input_files=[file_path], file_metadata=self.metadata_fn, filename_as_id=True
).load_data()
nodes = self._make_nodes(docs)
self._insert_into_vectorstore("UserUploadedDocument", nodes)
self._insert_into_vectorstore("OneGiantIndex", nodes)
return self._get_subject_query_engine("UserUploadedDocument")
class SimpleIngestPipeline:
def __init__(
self, data_dir_path: str, service_context: ServiceContext, create=False
) -> None:
self.data_dir = data_dir_path
self.service_ctx = service_context
self.embed_model = self.service_ctx.embed_model
self.vector_indexes = {}
self.metadata_fn = lambda x: {"title": path_leaf(x)}
self.node_parser = SimpleNodeParser(chunk_size=512)
self.create = create
def _load_data(self, path):
docs = SimpleDirectoryReader(
path, file_metadata=self.metadata_fn, filename_as_id=True
).load_data()
return docs
def _make_nodes(self, docs):
nodes = self.node_parser.get_nodes_from_documents(docs, show_progress=debug)
return nodes
def _insert_into_vectorstore(self, subject, nodes, create=False):
collection_name = f"augmentED_{subject}"
vector_store = MilvusVectorStore(
dim=EMBEDDING_DIM,
host="127.0.0.1",
port=default_server.listen_port,
collection_name=collection_name,
overwrite=create,
)
storage_ctx = StorageContext.from_defaults(vector_store=vector_store)
self.vector_indexes[subject] = VectorStoreIndex(
nodes=nodes,
service_context=self.service_ctx,
storage_context=storage_ctx,
)
def _load_vectorstore(self, subject):
collection_name = f"augmentED_{subject}"
vector_store = MilvusVectorStore(
dim=EMBEDDING_DIM,
host="127.0.0.1",
port=default_server.listen_port,
collection_name=collection_name,
overwrite=False
)
storage_ctx = StorageContext.from_defaults(vector_store=vector_store)
self.vector_indexes[subject] = VectorStoreIndex.from_vector_store(
vector_store=vector_store,
service_context=self.service_ctx,
storage_context=storage_ctx,
)
def _get_subject_query_engine(self, subject) -> Dict:
query_engine = self.vector_indexes[subject].as_query_engine(
similarity_top_k=3,
node_postprocessors=[
MetadataReplacementPostProcessor(target_metadata_key="window")
],
)
return query_engine
def run_pipeline(self):
if self.create:
self.one_giant_index_nodes = []
self.all_docs = []
for subject in subjects:
path = self.data_dir + PathSep + subjects[subject]
docs = self._load_data(path)
nodes = self._make_nodes(docs)
self._insert_into_vectorstore(subject=subject, nodes=nodes)
self.one_giant_index_nodes.extend(nodes)
self.all_docs.extend(docs)
self._insert_into_vectorstore(
subject="OneGiantIndex", nodes=self.one_giant_index_nodes, create=self.create
)
else:
for subject in subjects:
self._load_vectorstore(subject)
self._load_vectorstore("OneGiantIndex")
self.one_giant_index = self.vector_indexes["OneGiantIndex"]
if __name__ == "__main__":
pipe = AugmentedIngestPipeline(
data_dir_path=DATA_PATH,
service_context=ServiceContext.from_defaults(
llm=None, embed_model=HuggingFaceEmbedding(EMBEDDING_MODEL)
),
)
pipe.run_pipeline(create=True)
| [
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.vector_stores.MilvusVectorStore",
"llama_index.StorageContext.from_defaults",
"llama_index.readers.SimpleDirectoryReader",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.postprocessor.MetadataReplacementPostProcessor",
"llama_index.VectorStoreIndex",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.postprocessor.SimilarityPostprocessor",
"llama_index.embeddings.HuggingFaceEmbedding"
] | [((1300, 1460), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(3)', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""', 'include_metadata': '(True)'}), "(window_size=3, window_metadata_key=\n 'window', original_text_metadata_key='original_text', include_metadata=True\n )\n", (1338, 1460), False, 'from llama_index.node_parser import SentenceWindowNodeParser, SentenceSplitter, SimpleNodeParser\n'), ((2004, 2147), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'dim': 'EMBEDDING_DIM', 'host': '"""127.0.0.1"""', 'port': 'default_server.listen_port', 'collection_name': 'collection_name', 'overwrite': 'create'}), "(dim=EMBEDDING_DIM, host='127.0.0.1', port=default_server.\n listen_port, collection_name=collection_name, overwrite=create)\n", (2021, 2147), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((2237, 2292), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2265, 2292), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((2333, 2429), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {'nodes': 'nodes', 'service_context': 'self.service_ctx', 'storage_context': 'storage_ctx'}), '(nodes=nodes, service_context=self.service_ctx,\n storage_context=storage_ctx)\n', (2349, 2429), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((2588, 2730), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'dim': 'EMBEDDING_DIM', 'host': '"""127.0.0.1"""', 'port': 'default_server.listen_port', 'collection_name': 'collection_name', 'overwrite': '(False)'}), "(dim=EMBEDDING_DIM, host='127.0.0.1', port=default_server.\n listen_port, collection_name=collection_name, overwrite=False)\n", (2605, 2730), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((2819, 2874), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2847, 2874), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((2915, 3043), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'self.service_ctx', 'storage_context': 'storage_ctx'}), '(vector_store=vector_store,\n service_context=self.service_ctx, storage_context=storage_ctx)\n', (2949, 3043), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((6295, 6327), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {'chunk_size': '(512)'}), '(chunk_size=512)\n', (6311, 6327), False, 'from llama_index.node_parser import SentenceWindowNodeParser, SentenceSplitter, SimpleNodeParser\n'), ((6823, 6966), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'dim': 'EMBEDDING_DIM', 'host': '"""127.0.0.1"""', 'port': 'default_server.listen_port', 'collection_name': 'collection_name', 'overwrite': 'create'}), "(dim=EMBEDDING_DIM, host='127.0.0.1', port=default_server.\n listen_port, collection_name=collection_name, overwrite=create)\n", (6840, 6966), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((7056, 7111), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (7084, 7111), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((7152, 7248), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {'nodes': 'nodes', 'service_context': 'self.service_ctx', 'storage_context': 'storage_ctx'}), '(nodes=nodes, service_context=self.service_ctx,\n storage_context=storage_ctx)\n', (7168, 7248), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((7407, 7549), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'dim': 'EMBEDDING_DIM', 'host': '"""127.0.0.1"""', 'port': 'default_server.listen_port', 'collection_name': 'collection_name', 'overwrite': '(False)'}), "(dim=EMBEDDING_DIM, host='127.0.0.1', port=default_server.\n listen_port, collection_name=collection_name, overwrite=False)\n", (7424, 7549), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((7638, 7693), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (7666, 7693), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((7734, 7862), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'self.service_ctx', 'storage_context': 'storage_ctx'}), '(vector_store=vector_store,\n service_context=self.service_ctx, storage_context=storage_ctx)\n', (7768, 7862), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((1586, 1671), 'llama_index.readers.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {'file_metadata': 'self.metadata_fn', 'filename_as_id': '(True)'}), '(path, file_metadata=self.metadata_fn, filename_as_id=True\n )\n', (1607, 1671), False, 'from llama_index.readers import SimpleDirectoryReader\n'), ((5516, 5620), 'llama_index.readers.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]', 'file_metadata': 'self.metadata_fn', 'filename_as_id': '(True)'}), '(input_files=[file_path], file_metadata=self.\n metadata_fn, filename_as_id=True)\n', (5537, 5620), False, 'from llama_index.readers import SimpleDirectoryReader\n'), ((6254, 6266), 'chatbot.common.path_leaf', 'path_leaf', (['x'], {}), '(x)\n', (6263, 6266), False, 'from chatbot.common import DATA_PATH, EMBEDDING_DIM, EMBEDDING_MODEL, SIMILARITY_SEARCH_THRESHOLD, path_leaf, subjects, PathSep, debug\n'), ((6405, 6490), 'llama_index.readers.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {'file_metadata': 'self.metadata_fn', 'filename_as_id': '(True)'}), '(path, file_metadata=self.metadata_fn, filename_as_id=True\n )\n', (6426, 6490), False, 'from llama_index.readers import SimpleDirectoryReader\n'), ((3289, 3359), 'llama_index.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': 'SIMILARITY_SEARCH_THRESHOLD'}), '(similarity_cutoff=SIMILARITY_SEARCH_THRESHOLD)\n', (3312, 3359), False, 'from llama_index.postprocessor import SimilarityPostprocessor, MetadataReplacementPostProcessor\n'), ((3377, 3439), 'llama_index.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (3409, 3439), False, 'from llama_index.postprocessor import SimilarityPostprocessor, MetadataReplacementPostProcessor\n'), ((3721, 3791), 'llama_index.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': 'SIMILARITY_SEARCH_THRESHOLD'}), '(similarity_cutoff=SIMILARITY_SEARCH_THRESHOLD)\n', (3744, 3791), False, 'from llama_index.postprocessor import SimilarityPostprocessor, MetadataReplacementPostProcessor\n'), ((3809, 3871), 'llama_index.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (3841, 3871), False, 'from llama_index.postprocessor import SimilarityPostprocessor, MetadataReplacementPostProcessor\n'), ((8117, 8179), 'llama_index.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (8149, 8179), False, 'from llama_index.postprocessor import SimilarityPostprocessor, MetadataReplacementPostProcessor\n'), ((9294, 9331), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', (['EMBEDDING_MODEL'], {}), '(EMBEDDING_MODEL)\n', (9314, 9331), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n')] |
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-agents-llm-compiler-step')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
import nest_asyncio
nest_asyncio.apply()
from llama_index.packs.agents.llm_compiler.step import LLMCompilerAgentWorker
from llama_index.core.llama_pack import download_llama_pack
download_llama_pack(
"LLMCompilerAgentPack",
"./agent_pack",
skip_load=True,
)
from agent_pack.step import LLMCompilerAgentWorker
import json
from typing import Sequence, List
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
import nest_asyncio
nest_asyncio.apply()
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
tools = [multiply_tool, add_tool]
multiply_tool.metadata.fn_schema_str
from llama_index.core.agent import AgentRunner
llm = OpenAI(model="gpt-4")
callback_manager = llm.callback_manager
agent_worker = LLMCompilerAgentWorker.from_tools(
tools, llm=llm, verbose=True, callback_manager=callback_manager
)
agent = AgentRunner(agent_worker, callback_manager=callback_manager)
response = agent.chat("What is (121 * 3) + 42?")
response
agent.memory.get_all()
get_ipython().system('pip install llama-index-readers-wikipedia')
from llama_index.readers.wikipedia import WikipediaReader
wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Miami"]
city_docs = {}
reader = WikipediaReader()
for wiki_title in wiki_titles:
docs = reader.load_data(pages=[wiki_title])
city_docs[wiki_title] = docs
from llama_index.core import ServiceContext
from llama_index.llms.openai import OpenAI
from llama_index.core.callbacks import CallbackManager
llm = OpenAI(temperature=0, model="gpt-4")
service_context = ServiceContext.from_defaults(llm=llm)
callback_manager = CallbackManager([])
from llama_index.core import load_index_from_storage, StorageContext
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core import VectorStoreIndex
import os
node_parser = SentenceSplitter()
query_engine_tools = []
for idx, wiki_title in enumerate(wiki_titles):
nodes = node_parser.get_nodes_from_documents(city_docs[wiki_title])
if not os.path.exists(f"./data/{wiki_title}"):
vector_index = VectorStoreIndex(
nodes, service_context=service_context, callback_manager=callback_manager
)
vector_index.storage_context.persist(persist_dir=f"./data/{wiki_title}")
else:
vector_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=f"./data/{wiki_title}"),
service_context=service_context,
callback_manager=callback_manager,
)
vector_query_engine = vector_index.as_query_engine()
query_engine_tools.append(
QueryEngineTool(
query_engine=vector_query_engine,
metadata=ToolMetadata(
name=f"vector_tool_{wiki_title}",
description=(
"Useful for questions related to specific aspects of"
f" {wiki_title} (e.g. the history, arts and culture,"
" sports, demographics, or more)."
),
),
)
)
from llama_index.core.agent import AgentRunner
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4")
agent_worker = LLMCompilerAgentWorker.from_tools(
query_engine_tools,
llm=llm,
verbose=True,
callback_manager=callback_manager,
)
agent = AgentRunner(agent_worker, callback_manager=callback_manager)
response = agent.chat(
"Tell me about the demographics of Miami, and compare that with the demographics of Chicago?"
)
print(str(response))
response = agent.chat(
"Is the climate of Chicago or Seattle better during the wintertime?"
)
print(str(response)) | [
"llama_index.core.llama_pack.download_llama_pack",
"llama_index.core.agent.AgentRunner",
"llama_index.core.tools.ToolMetadata",
"llama_index.llms.openai.OpenAI",
"llama_index.core.VectorStoreIndex",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.ServiceContext.from_defaults",
"llama_index.core.tools.FunctionTool.from_defaults",
"llama_index.readers.wikipedia.WikipediaReader"
] | [((266, 281), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (279, 281), True, 'import phoenix as px\n'), ((385, 405), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (403, 405), False, 'import nest_asyncio\n'), ((549, 624), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""LLMCompilerAgentPack"""', '"""./agent_pack"""'], {'skip_load': '(True)'}), "('LLMCompilerAgentPack', './agent_pack', skip_load=True)\n", (568, 624), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((910, 930), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (928, 930), False, 'import nest_asyncio\n'), ((1069, 1108), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'multiply'}), '(fn=multiply)\n', (1095, 1108), False, 'from llama_index.core.tools import BaseTool, FunctionTool\n'), ((1231, 1265), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'add'}), '(fn=add)\n', (1257, 1265), False, 'from llama_index.core.tools import BaseTool, FunctionTool\n'), ((1398, 1419), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""'}), "(model='gpt-4')\n", (1404, 1419), False, 'from llama_index.llms.openai import OpenAI\n'), ((1479, 1581), 'agent_pack.step.LLMCompilerAgentWorker.from_tools', 'LLMCompilerAgentWorker.from_tools', (['tools'], {'llm': 'llm', 'verbose': '(True)', 'callback_manager': 'callback_manager'}), '(tools, llm=llm, verbose=True,\n callback_manager=callback_manager)\n', (1512, 1581), False, 'from agent_pack.step import LLMCompilerAgentWorker\n'), ((1592, 1652), 'llama_index.core.agent.AgentRunner', 'AgentRunner', (['agent_worker'], {'callback_manager': 'callback_manager'}), '(agent_worker, callback_manager=callback_manager)\n', (1603, 1652), False, 'from llama_index.core.agent import AgentRunner\n'), ((1966, 1983), 'llama_index.readers.wikipedia.WikipediaReader', 'WikipediaReader', ([], {}), '()\n', (1981, 1983), False, 'from llama_index.readers.wikipedia import WikipediaReader\n'), ((2248, 2284), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (2254, 2284), False, 'from llama_index.llms.openai import OpenAI\n'), ((2303, 2340), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (2331, 2340), False, 'from llama_index.core import ServiceContext\n'), ((2360, 2379), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2375, 2379), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((2646, 2664), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (2662, 2664), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((3946, 3967), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""'}), "(model='gpt-4')\n", (3952, 3967), False, 'from llama_index.llms.openai import OpenAI\n'), ((3983, 4098), 'agent_pack.step.LLMCompilerAgentWorker.from_tools', 'LLMCompilerAgentWorker.from_tools', (['query_engine_tools'], {'llm': 'llm', 'verbose': '(True)', 'callback_manager': 'callback_manager'}), '(query_engine_tools, llm=llm, verbose=True,\n callback_manager=callback_manager)\n', (4016, 4098), False, 'from agent_pack.step import LLMCompilerAgentWorker\n'), ((4122, 4182), 'llama_index.core.agent.AgentRunner', 'AgentRunner', (['agent_worker'], {'callback_manager': 'callback_manager'}), '(agent_worker, callback_manager=callback_manager)\n', (4133, 4182), False, 'from llama_index.core.agent import AgentRunner\n'), ((2822, 2860), 'os.path.exists', 'os.path.exists', (['f"""./data/{wiki_title}"""'], {}), "(f'./data/{wiki_title}')\n", (2836, 2860), False, 'import os\n'), ((2885, 2981), 'llama_index.core.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'service_context': 'service_context', 'callback_manager': 'callback_manager'}), '(nodes, service_context=service_context, callback_manager=\n callback_manager)\n', (2901, 2981), False, 'from llama_index.core import VectorStoreIndex\n'), ((3150, 3214), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'f"""./data/{wiki_title}"""'}), "(persist_dir=f'./data/{wiki_title}')\n", (3178, 3214), False, 'from llama_index.core import load_index_from_storage, StorageContext\n'), ((3499, 3705), 'llama_index.core.tools.ToolMetadata', 'ToolMetadata', ([], {'name': 'f"""vector_tool_{wiki_title}"""', 'description': 'f"""Useful for questions related to specific aspects of {wiki_title} (e.g. the history, arts and culture, sports, demographics, or more)."""'}), "(name=f'vector_tool_{wiki_title}', description=\n f'Useful for questions related to specific aspects of {wiki_title} (e.g. the history, arts and culture, sports, demographics, or more).'\n )\n", (3511, 3705), False, 'from llama_index.core.tools import QueryEngineTool, ToolMetadata\n')] |
import os
import hashlib
from threading import Thread
from pathlib import Path
#import llama_index
from openai import OpenAI
import constants as c
from llama_index import StorageContext, VectorStoreIndex, Document
from llama_index.node_parser import SimpleNodeParser
from llama_index import SimpleDirectoryReader
c.Get_API()
client = OpenAI()
newdocspath = ""
masterpath = ""
basepath = ""
persistpath = ""
indexpath = ""
class Document:
__slots__ = ['text', 'doc_id', 'id_', 'hash']
def __init__(self, text: str, doc_id: str):
self.text = text
self.doc_id = doc_id
self.id_ = doc_id
self.hash = self.generate_hash(text)
def generate_hash(self, text: str) -> str:
return hashlib.sha256(text.encode()).hexdigest()
def get_metadata_str(self, mode=None) -> str:
return f"{self.doc_id}-{self.hash}"
def get_content(self, metadata_mode=None) -> str:
return self.text
def index_document(doc: Document):
print("index_document reached")
index = VectorStoreIndex()
index.add_document(doc)
print("index doscument complete")
def CreateUpdate_Index(basepath, masterdocs, newdocs, indexpath, action, tool ):
print('Create/Update function running')
# Check if index path directory is empty
main_dir = "."
indexes_dir = os.path.join(main_dir, "Indexes")
chkindexpath = os.path.join(indexes_dir, tool)
print('ckindexpath', chkindexpath)
index_dir = Path(chkindexpath)
print('index_dir',index_dir)
is_empty =len(os.listdir(index_dir)) == 0
print('is empty', is_empty)
if is_empty:
print('Running creating index function')
print(basepath, masterdocs, newdocs, index_dir, tool)
Create_Index(basepath, masterdocs, newdocs, index_dir, tool )
else:
print('Running updating index function')
Update_Index(basepath, masterdocs, newdocs, index_dir)
# print('Running creating index function')
# print(basepath, masterdocs, newdocs, index_dir, tool)
# Create_Index(basepath, masterdocs, newdocs, index_dir, tool )
def Create_Index(basepath: str, masterdocs: str, newdocs: str, indexpath: str, tool: str):
print('Creating index')
# Load documents
docpath = masterdocs
documents = SimpleDirectoryReader(input_dir=docpath).load_data()
# Parse documents into nodes
parser = SimpleNodeParser.from_defaults()
nodes = parser.get_nodes_from_documents(documents)
# Create index using nodes
index = VectorStoreIndex(nodes=nodes)
for doc in documents:
index.insert(doc)
# Persist index
persist_path = os.path.join(basepath, indexpath)
print('persist_path= ', persist_path)
saveindexpath = persist_path
index.storage_context.persist(saveindexpath)
print('Index created and saved')
# def Update_Index(basepath: str, masterdocs: str, newdocs: str, indexpath: str):
# print("update index reached")
# from llama_index import load_index_from_storage, Document
# print('update_index indexpath', indexpath)
#
# try:
# storage_context = StorageContext.from_defaults(persist_dir=indexpath)
# new_index = load_index_from_storage(storage_context)
# new_docs_dir = os.path.join(basepath, newdocs)
# is_empty = len(os.listdir(newdocs)) == 0
# if not is_empty:
# for filename in os.listdir(new_docs_dir):
# path = os.path.join(new_docs_dir, filename)
# with open(path) as f:
# # Create document
# text = f.read()
# doc = Document(text, filename)
# new_index.insert(doc)
# storage_context.persist(new_index)
# print("Update index completed")
# except Exception as e:
# print(e)
def Update_Index(basepath: str, masterdocs: str, newdocs: str, indexpath: str):
# Loading from disk
from llama_index import StorageContext, load_index_from_storage
from llama_index import PromptHelper, LLMPredictor, ServiceContext
import openai
openai.api_key = c.Get_API()
is_empty =len(os.listdir(newdocs)) == 0
if not is_empty:
storage_context = StorageContext.from_defaults(persist_dir=indexpath)
index = load_index_from_storage(storage_context)
new_docs_dir = os.path.join(basepath, newdocs)
llm_predictor =LLMPredictor(llm=openai)
max_input_size = 4096
num_outputs = 5000
max_chunk_overlap = 0.5
chunk_size_limit = 3900
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
reader = SimpleDirectoryReader(new_docs_dir)
documents = reader.load_data()
persist_path = persist_path = os.path.join(basepath, indexpath)
for d in documents:
index.insert(document = d, service_context = service_context)
print(persist_path)
storage_context.persist(persist_dir = persist_path)
else:
print('no new docs')
def AskBuild(tool, choice):
print("AskBuild reached : ", tool, choice)
if choice == 'build':
print("Askbuild build reached")
main_dir = "."
#train_dir = os.path.join(main_dir, "MyAI_Training_Docs")
train_dir = ".//MyAI_Training_Docs//"
train_path = os.path.join(train_dir, tool)
master_dir = os.path.join(train_path, "Master")
persistpath = 'Indexes//' + tool + '//'
if tool == 'ai':
doc_path = "ai"
elif tool == 'gn':
doc_path = "gn"
newdocspath = train_path + "//Docs"
masterpath = train_path + "//Master"
print(tool, choice)
print("PP: ", persistpath)
print("nd: ", newdocspath)
print("mp: ", masterpath)
#print("bp: ", basepath)
basepath = ""
CreateUpdate_Index(basepath, masterpath, newdocspath, persistpath, choice, tool)
print("Askbuild gn complete")
elif choice == 'ask':
print("Askbuild ask reached")
persistpath = 'Indexes//'
newdocspath = 'Docs'
masterpath = 'Master'
main_dir = "."
basepath = os.path.join(main_dir, tool)
indexpath = main_dir + '//Indexes//' + tool + '//'
AskQuestion(indexpath, persistpath)
print("Ask build ask complete")
else:
pass
def AskQuestion(topic: str, action: str, question: str):
from llama_index import load_index_from_storage
print(topic)
print("Ask question reached")
indexpath = './/Indexes//' + topic + '//'
print('indexpath= ', indexpath)
print(os.listdir(indexpath))
storage_context = StorageContext.from_defaults(persist_dir=indexpath)
new_index = load_index_from_storage(storage_context)
new_query_engine = new_index.as_query_engine()
while True:
if question.lower() == "exit":
break
response = new_query_engine.query(question)
print(response)
print("AskQuestion complete")
return response
#AskBuild('gn', 'build')
| [
"llama_index.SimpleDirectoryReader",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.PromptHelper",
"llama_index.VectorStoreIndex",
"llama_index.load_index_from_storage"
] | [((314, 325), 'constants.Get_API', 'c.Get_API', ([], {}), '()\n', (323, 325), True, 'import constants as c\n'), ((335, 343), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (341, 343), False, 'from openai import OpenAI\n'), ((1027, 1045), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {}), '()\n', (1043, 1045), False, 'from llama_index import StorageContext, VectorStoreIndex, Document\n'), ((1321, 1354), 'os.path.join', 'os.path.join', (['main_dir', '"""Indexes"""'], {}), "(main_dir, 'Indexes')\n", (1333, 1354), False, 'import os\n'), ((1374, 1405), 'os.path.join', 'os.path.join', (['indexes_dir', 'tool'], {}), '(indexes_dir, tool)\n', (1386, 1405), False, 'import os\n'), ((1461, 1479), 'pathlib.Path', 'Path', (['chkindexpath'], {}), '(chkindexpath)\n', (1465, 1479), False, 'from pathlib import Path\n'), ((2370, 2402), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {}), '()\n', (2400, 2402), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((2502, 2531), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {'nodes': 'nodes'}), '(nodes=nodes)\n', (2518, 2531), False, 'from llama_index import StorageContext, VectorStoreIndex, Document\n'), ((2624, 2657), 'os.path.join', 'os.path.join', (['basepath', 'indexpath'], {}), '(basepath, indexpath)\n', (2636, 2657), False, 'import os\n'), ((4102, 4113), 'constants.Get_API', 'c.Get_API', ([], {}), '()\n', (4111, 4113), True, 'import constants as c\n'), ((6806, 6857), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'indexpath'}), '(persist_dir=indexpath)\n', (6834, 6857), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((6874, 6914), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (6897, 6914), False, 'from llama_index import load_index_from_storage\n'), ((4207, 4258), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'indexpath'}), '(persist_dir=indexpath)\n', (4235, 4258), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((4275, 4315), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (4298, 4315), False, 'from llama_index import load_index_from_storage\n'), ((4339, 4370), 'os.path.join', 'os.path.join', (['basepath', 'newdocs'], {}), '(basepath, newdocs)\n', (4351, 4370), False, 'import os\n'), ((4394, 4418), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'openai'}), '(llm=openai)\n', (4406, 4418), False, 'from llama_index import PromptHelper, LLMPredictor, ServiceContext\n'), ((4564, 4663), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (4576, 4663), False, 'from llama_index import PromptHelper, LLMPredictor, ServiceContext\n'), ((4687, 4778), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (4715, 4778), False, 'from llama_index import PromptHelper, LLMPredictor, ServiceContext\n'), ((4792, 4827), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['new_docs_dir'], {}), '(new_docs_dir)\n', (4813, 4827), False, 'from llama_index import SimpleDirectoryReader\n'), ((4905, 4938), 'os.path.join', 'os.path.join', (['basepath', 'indexpath'], {}), '(basepath, indexpath)\n', (4917, 4938), False, 'import os\n'), ((5468, 5497), 'os.path.join', 'os.path.join', (['train_dir', 'tool'], {}), '(train_dir, tool)\n', (5480, 5497), False, 'import os\n'), ((5519, 5553), 'os.path.join', 'os.path.join', (['train_path', '"""Master"""'], {}), "(train_path, 'Master')\n", (5531, 5553), False, 'import os\n'), ((6761, 6782), 'os.listdir', 'os.listdir', (['indexpath'], {}), '(indexpath)\n', (6771, 6782), False, 'import os\n'), ((1531, 1552), 'os.listdir', 'os.listdir', (['index_dir'], {}), '(index_dir)\n', (1541, 1552), False, 'import os\n'), ((2270, 2310), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'docpath'}), '(input_dir=docpath)\n', (2291, 2310), False, 'from llama_index import SimpleDirectoryReader\n'), ((4133, 4152), 'os.listdir', 'os.listdir', (['newdocs'], {}), '(newdocs)\n', (4143, 4152), False, 'import os\n'), ((6312, 6340), 'os.path.join', 'os.path.join', (['main_dir', 'tool'], {}), '(main_dir, tool)\n', (6324, 6340), False, 'import os\n')] |
import logging
from dataclasses import dataclass
from typing import Any, List, Optional, cast
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.core.embeddings.base import BaseEmbedding
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.llm_predictor import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.llms.llm import LLM
from llama_index.llms.utils import LLMType, resolve_llm
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser, TextSplitter
from llama_index.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.prompts.base import BasePromptTemplate
from llama_index.schema import TransformComponent
from llama_index.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm=llm,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
context_window=context_window,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
chunk_overlap=chunk_overlap,
num_output=num_output,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
transformations=transformations,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm.system_prompt = llm.system_prompt or system_prompt
llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt
llm.pydantic_program_mode = (
llm.pydantic_program_mode or pydantic_program_mode
)
if llm_predictor is not None:
print("LLMPredictor is deprecated, please use LLM instead.")
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.embeddings.loading import load_embed_model
from llama_index.extractors.loading import load_extractor
from llama_index.llm_predictor.loading import load_predictor
from llama_index.node_parser.loading import load_parser
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.extractors.loading.load_extractor",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.llm_predictor.LLMPredictor",
"llama_index.logger.LlamaLogger",
"llama_index.embeddings.utils.resolve_embed_model",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.callbacks.base.CallbackManager",
"llama_index.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.llm_predictor.loading.load_predictor"
] | [((962, 989), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (979, 989), False, 'import logging\n'), ((1764, 1821), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1794, 1821), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((5128, 5156), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (5132, 5156), False, 'from typing import Any, List, Optional, cast\n'), ((7575, 7607), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7594, 7607), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10019, 10047), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (10023, 10047), False, 'from typing import Any, List, Optional, cast\n'), ((11263, 11295), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11282, 11295), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14437, 14487), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14451, 14487), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((14511, 14561), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14527, 14561), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((14587, 14645), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14609, 14645), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6319, 6338), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6334, 6338), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6506, 6522), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6517, 6522), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((6954, 7020), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (6966, 7020), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((8483, 8496), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8494, 8496), False, 'from llama_index.logger import LlamaLogger\n'), ((10558, 10574), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10569, 10574), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((10603, 10624), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10615, 10624), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((1363, 1380), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1378, 1380), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((14821, 14843), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (14832, 14843), False, 'from llama_index.node_parser.loading import load_parser\n'), ((14915, 14940), 'llama_index.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (14929, 14940), False, 'from llama_index.extractors.loading import load_extractor\n')] |
import os
from getpass import getpass
if os.getenv("OPENAI_API_KEY") is None:
os.environ["OPENAI_API_KEY"] = getpass(
"Paste your OpenAI key from:"
" https://platform.openai.com/account/api-keys\n"
)
assert os.getenv("OPENAI_API_KEY", "").startswith(
"sk-"
), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas pyarrow tqdm')
get_ipython().run_line_magic('pip', 'install -q llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install -q llama-index-callbacks-openinference')
import hashlib
import json
from pathlib import Path
import os
import textwrap
from typing import List, Union
import llama_index.core
from llama_index.readers.web import SimpleWebPageReader
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.callbacks import CallbackManager
from llama_index.callbacks.openinference import OpenInferenceCallbackHandler
from llama_index.callbacks.openinference.base import (
as_dataframe,
QueryData,
NodeData,
)
from llama_index.core.node_parser import SimpleNodeParser
import pandas as pd
from tqdm import tqdm
documents = SimpleWebPageReader().load_data(
[
"https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt"
]
)
print(documents[0].text)
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(documents)
print(nodes[0].text)
callback_handler = OpenInferenceCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llama_index.core.Settings.callback_manager = callback_manager
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
max_characters_per_line = 80
queries = [
"What did Paul Graham do growing up?",
"When and how did Paul Graham's mother die?",
"What, in Paul Graham's opinion, is the most distinctive thing about YC?",
"When and how did Paul Graham meet Jessica Livingston?",
"What is Bel, and when and where was it written?",
]
for query in queries:
response = query_engine.query(query)
print("Query")
print("=====")
print(textwrap.fill(query, max_characters_per_line))
print()
print("Response")
print("========")
print(textwrap.fill(str(response), max_characters_per_line))
print()
query_data_buffer = callback_handler.flush_query_data_buffer()
query_dataframe = as_dataframe(query_data_buffer)
query_dataframe
class ParquetCallback:
def __init__(
self, data_path: Union[str, Path], max_buffer_length: int = 1000
):
self._data_path = Path(data_path)
self._data_path.mkdir(parents=True, exist_ok=False)
self._max_buffer_length = max_buffer_length
self._batch_index = 0
def __call__(
self,
query_data_buffer: List[QueryData],
node_data_buffer: List[NodeData],
) -> None:
if len(query_data_buffer) >= self._max_buffer_length:
query_dataframe = as_dataframe(query_data_buffer)
file_path = self._data_path / f"log-{self._batch_index}.parquet"
query_dataframe.to_parquet(file_path)
self._batch_index += 1
query_data_buffer.clear() # ⚠️ clear the buffer or it will keep growing forever!
node_data_buffer.clear() # didn't log node_data_buffer, but still need to clear it
data_path = "data"
parquet_writer = ParquetCallback(
data_path=data_path,
max_buffer_length=1,
)
callback_handler = OpenInferenceCallbackHandler(callback=parquet_writer)
callback_manager = CallbackManager([callback_handler])
llama_index.core.Settings.callback_manager = callback_manager
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
for query in tqdm(queries):
query_engine.query(query)
query_dataframes = []
for file_name in os.listdir(data_path):
file_path = os.path.join(data_path, file_name)
query_dataframes.append(pd.read_parquet(file_path))
query_dataframe = pd.concat(query_dataframes)
query_dataframe | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.callbacks.openinference.OpenInferenceCallbackHandler",
"llama_index.readers.web.SimpleWebPageReader",
"llama_index.callbacks.openinference.base.as_dataframe"
] | [((1483, 1501), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (1499, 1501), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((1596, 1626), 'llama_index.callbacks.openinference.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '()\n', (1624, 1626), False, 'from llama_index.callbacks.openinference import OpenInferenceCallbackHandler\n'), ((1646, 1681), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[callback_handler]'], {}), '([callback_handler])\n', (1661, 1681), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((1755, 1797), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1786, 1797), False, 'from llama_index.core import VectorStoreIndex\n'), ((2546, 2577), 'llama_index.callbacks.openinference.base.as_dataframe', 'as_dataframe', (['query_data_buffer'], {}), '(query_data_buffer)\n', (2558, 2577), False, 'from llama_index.callbacks.openinference.base import as_dataframe, QueryData, NodeData\n'), ((3641, 3694), 'llama_index.callbacks.openinference.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {'callback': 'parquet_writer'}), '(callback=parquet_writer)\n', (3669, 3694), False, 'from llama_index.callbacks.openinference import OpenInferenceCallbackHandler\n'), ((3714, 3749), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[callback_handler]'], {}), '([callback_handler])\n', (3729, 3749), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((3821, 3863), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (3852, 3863), False, 'from llama_index.core import VectorStoreIndex\n'), ((3917, 3930), 'tqdm.tqdm', 'tqdm', (['queries'], {}), '(queries)\n', (3921, 3930), False, 'from tqdm import tqdm\n'), ((4004, 4025), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (4014, 4025), False, 'import os\n'), ((4152, 4179), 'pandas.concat', 'pd.concat', (['query_dataframes'], {}), '(query_dataframes)\n', (4161, 4179), True, 'import pandas as pd\n'), ((42, 69), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (51, 69), False, 'import os\n'), ((114, 209), 'getpass.getpass', 'getpass', (['"""Paste your OpenAI key from: https://platform.openai.com/account/api-keys\n"""'], {}), "(\n 'Paste your OpenAI key from: https://platform.openai.com/account/api-keys\\n'\n )\n", (121, 209), False, 'from getpass import getpass\n'), ((4043, 4077), 'os.path.join', 'os.path.join', (['data_path', 'file_name'], {}), '(data_path, file_name)\n', (4055, 4077), False, 'import os\n'), ((232, 263), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (241, 263), False, 'import os\n'), ((1275, 1296), 'llama_index.readers.web.SimpleWebPageReader', 'SimpleWebPageReader', ([], {}), '()\n', (1294, 1296), False, 'from llama_index.readers.web import SimpleWebPageReader\n'), ((2282, 2327), 'textwrap.fill', 'textwrap.fill', (['query', 'max_characters_per_line'], {}), '(query, max_characters_per_line)\n', (2295, 2327), False, 'import textwrap\n'), ((2745, 2760), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (2749, 2760), False, 'from pathlib import Path\n'), ((4106, 4132), 'pandas.read_parquet', 'pd.read_parquet', (['file_path'], {}), '(file_path)\n', (4121, 4132), True, 'import pandas as pd\n'), ((3129, 3160), 'llama_index.callbacks.openinference.base.as_dataframe', 'as_dataframe', (['query_data_buffer'], {}), '(query_data_buffer)\n', (3141, 3160), False, 'from llama_index.callbacks.openinference.base import as_dataframe, QueryData, NodeData\n')] |
import os
import hashlib
from threading import Thread
from pathlib import Path
#import llama_index
from openai import OpenAI
import constants as c
c.Get_API()
client = OpenAI()
newdocspath = ""
masterpath = ""
basepath = ""
persistpath = ""
# test
class Document:
__slots__ = ['text', 'doc_id', 'id_', 'hash']
def __init__(self, text: str, doc_id: str):
self.text = text
self.doc_id = doc_id
self.id_ = doc_id
self.hash = self.generate_hash(text)
def generate_hash(self, text: str) -> str:
return hashlib.sha256(text.encode()).hexdigest()
def get_metadata_str(self, mode=None) -> str:
return f"{self.doc_id}-{self.hash}"
def get_content(self, metadata_mode=None) -> str:
return self.text
def index_document(doc: Document):
print("index_document reached")
index = VectorStoreIndex()
index.add_document(doc)
print("index doscument complete")
def CreateUpdate_Index(basepath, masterdocs, newdocs, indexpath, action, tool ):
print('Create/Update function running')
# Ask questions until user exits
while True:
# Check if index path directory is empty
chkindexpath = "Z:\\MyChatBot_v1.0\\"+ tool + "\\index\\"
print(chkindexpath)
index_dir = Path(chkindexpath)
is_empty = len(os.listdir(index_dir)) == 0
if is_empty:
print('Running creating index function')
Create_Index(basepath, masterdocs, newdocs, indexpath, tool )
else:
print('Running updating index function')
Update_Index(basepath, masterdocs, newdocs, indexpath)
def Create_Index(basepath: str, masterdocs: str, newdocs: str, indexpath: str, tool):
print('Creating index')
from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document
# Specify the input_dir path
docpath = masterdocs
documents = SimpleDirectoryReader(input_dir=docpath).load_data()
# Create an index from the documents
index = VectorStoreIndex.from_documents(documents)
# Persist index to disk
saveindexpath = basepath + indexpath
index.storage_context.persist(saveindexpath)
print('Index created and saved')
docs_dir = os.path.join("Z:\\MyAI_Training_Docs\\", tool, "_Training_Docs\\docs")
doc_paths = Path(docs_dir).glob("*")
num_nodes = 8
nodes = [BaseNode() for _ in range(num_nodes)]
index = VectorStoreIndex(nodes=nodes)
threads = []
for path in doc_paths:
with open(path) as f:
text = f.read()
doc = Document(text, path.name)
thread = Thread(target=index_document, args=(doc,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
storage_context = StorageContext(indexdir=indexpath)
storage_context.persist(index)
print("Create index complete")
def Update_Index(basepath: str, masterdocs: str, newdocs: str, indexpath: str):
print("update index reached")
import os
from llama_index import load_index_from_storage
storage_context = StorageContext.from_defaults(indexpath)
index = load_index_from_storage(storage_context)
new_docs_dir = os.path.join(basepath, newdocs)
for filename in os.listdir(new_docs_dir):
path = os.path.join(new_docs_dir, filename)
with open(path) as f:
text = f.read()
doc = Document(text, filename)
index.add_document(doc)
storage_context.persist(index)
print("Update index completed")
def AskBuild(tool, choice):
print("AskBuild reached : ", tool, choice)
if choice == 'build':
print("Askbuild build reached")
basepath = 'Z:\\MyAI_Training_Docs\\'
persistpath = 'Index\\Index\\'
if tool == 'ai':
doc_path = "AI"
elif tool == 'gn':
doc_path = "GN"
newdocspath = basepath + doc_path + "_Training_Docs\\Docs"
masterpath = basepath + doc_path + "_Training_Docs\\Master"
print(tool, choice)
print("PP: ", persistpath)
print("nd: ", newdocspath)
print("mp: ", masterpath)
print("bp: ", basepath)
CreateUpdate_Index(basepath, masterpath, newdocspath, persistpath, choice, tool)
print("Askbuild GN complete")
elif choice == 'ask':
print("Askbuild ask reached")
persistpath = 'Index\\Index\\'
newdocspath = 'Docs'
masterpath = 'Master'
basepath = 'Z:\\MyChatBot_v1.0\\' + tool + '\\'
AskQuestion(basepath, persistpath)
print("Ask build ask complete")
else:
pass
def AskQuestion(indexpath: str):
print("Ask question reached")
storage_context = StorageContext.from_defaults(indexpath)
index = load_index_from_storage(storage_context)
while True:
question = input("Enter question: ")
if question.lower() == "exit":
break
response = index.query(question)
print(response)
print("AskQuestion complete") | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.StorageContext",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.load_index_from_storage",
"llama_index.Document"
] | [((147, 158), 'constants.Get_API', 'c.Get_API', ([], {}), '()\n', (156, 158), True, 'import constants as c\n'), ((168, 176), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (174, 176), False, 'from openai import OpenAI\n'), ((854, 872), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {}), '()\n', (870, 872), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((2052, 2094), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2083, 2094), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((2268, 2338), 'os.path.join', 'os.path.join', (['"""Z:\\\\MyAI_Training_Docs\\\\"""', 'tool', '"""_Training_Docs\\\\docs"""'], {}), "('Z:\\\\MyAI_Training_Docs\\\\', tool, '_Training_Docs\\\\docs')\n", (2280, 2338), False, 'import os\n'), ((2462, 2491), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {'nodes': 'nodes'}), '(nodes=nodes)\n', (2478, 2491), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((2822, 2856), 'llama_index.StorageContext', 'StorageContext', ([], {'indexdir': 'indexpath'}), '(indexdir=indexpath)\n', (2836, 2856), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((3131, 3170), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', (['indexpath'], {}), '(indexpath)\n', (3159, 3170), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((3183, 3223), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (3206, 3223), False, 'from llama_index import load_index_from_storage\n'), ((3244, 3275), 'os.path.join', 'os.path.join', (['basepath', 'newdocs'], {}), '(basepath, newdocs)\n', (3256, 3275), False, 'import os\n'), ((3296, 3320), 'os.listdir', 'os.listdir', (['new_docs_dir'], {}), '(new_docs_dir)\n', (3306, 3320), False, 'import os\n'), ((4751, 4790), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', (['indexpath'], {}), '(indexpath)\n', (4779, 4790), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((4803, 4843), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (4826, 4843), False, 'from llama_index import load_index_from_storage\n'), ((1282, 1300), 'pathlib.Path', 'Path', (['chkindexpath'], {}), '(chkindexpath)\n', (1286, 1300), False, 'from pathlib import Path\n'), ((2609, 2634), 'llama_index.Document', 'Document', (['text', 'path.name'], {}), '(text, path.name)\n', (2617, 2634), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((2652, 2694), 'threading.Thread', 'Thread', ([], {'target': 'index_document', 'args': '(doc,)'}), '(target=index_document, args=(doc,))\n', (2658, 2694), False, 'from threading import Thread\n'), ((3337, 3373), 'os.path.join', 'os.path.join', (['new_docs_dir', 'filename'], {}), '(new_docs_dir, filename)\n', (3349, 3373), False, 'import os\n'), ((3446, 3470), 'llama_index.Document', 'Document', (['text', 'filename'], {}), '(text, filename)\n', (3454, 3470), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((1945, 1985), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'docpath'}), '(input_dir=docpath)\n', (1966, 1985), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((2355, 2369), 'pathlib.Path', 'Path', (['docs_dir'], {}), '(docs_dir)\n', (2359, 2369), False, 'from pathlib import Path\n'), ((1324, 1345), 'os.listdir', 'os.listdir', (['index_dir'], {}), '(index_dir)\n', (1334, 1345), False, 'import os\n')] |
import dataclasses
import logging
from dataclasses import dataclass
from typing import Optional
from llama_index.bridge.langchain import BaseLanguageModel
import llama_index
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser
from llama_index.node_parser.simple import SimpleNodeParser
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SimpleNodeParser.from_defaults(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata = dataclasses.replace(llm_metadata, context_window=context_window)
if num_output is not None:
llm_metadata = dataclasses.replace(llm_metadata, num_output=num_output)
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
node_parser: NodeParser
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[BaseLanguageModel] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
llama_logger=llama_logger,
callback_manager=callback_manager,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
)
callback_manager = callback_manager or CallbackManager([])
if llm is not None:
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or LLMPredictor()
llm_predictor.callback_manager = callback_manager
# NOTE: the embed_model isn't used in all indices
embed_model = embed_model or OpenAIEmbedding()
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.get_llm_metadata(),
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[BaseLanguageModel] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm is not None:
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
llm_predictor.callback_manager = callback_manager
# NOTE: the embed_model isn't used in all indices
embed_model = embed_model or service_context.embed_model
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.get_llm_metadata(),
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or service_context.node_parser
if chunk_size is not None or chunk_overlap is not None:
node_parser = _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.langchain_helpers.chain_wrapper.LLMPredictor",
"llama_index.logger.LlamaLogger",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.callbacks.base.CallbackManager",
"llama_index.embeddings.openai.OpenAIEmbedding",
"llama_index.node_parser.simple.SimpleNodeParser.from_defaults"
] | [((714, 741), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (731, 741), False, 'import logging\n'), ((972, 1094), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callback_manager': 'callback_manager'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, callback_manager=callback_manager)\n', (1002, 1094), False, 'from llama_index.node_parser.simple import SimpleNodeParser\n'), ((1567, 1624), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1597, 1624), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((1380, 1444), 'dataclasses.replace', 'dataclasses.replace', (['llm_metadata'], {'context_window': 'context_window'}), '(llm_metadata, context_window=context_window)\n', (1399, 1444), False, 'import dataclasses\n'), ((1499, 1555), 'dataclasses.replace', 'dataclasses.replace', (['llm_metadata'], {'num_output': 'num_output'}), '(llm_metadata, num_output=num_output)\n', (1518, 1555), False, 'import dataclasses\n'), ((4767, 4786), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (4782, 4786), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((4963, 4984), 'llama_index.langchain_helpers.chain_wrapper.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (4975, 4984), False, 'from llama_index.langchain_helpers.chain_wrapper import LLMPredictor\n'), ((5026, 5040), 'llama_index.langchain_helpers.chain_wrapper.LLMPredictor', 'LLMPredictor', ([], {}), '()\n', (5038, 5040), False, 'from llama_index.langchain_helpers.chain_wrapper import LLMPredictor\n'), ((5195, 5212), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (5210, 5212), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((5723, 5736), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (5734, 5736), False, 'from llama_index.logger import LlamaLogger\n'), ((7442, 7463), 'llama_index.langchain_helpers.chain_wrapper.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (7454, 7463), False, 'from llama_index.langchain_helpers.chain_wrapper import LLMPredictor\n')] |
import llama_index.core
llama_index.core.set_global_handler("simple")
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SimpleFileNodeParser
from llama_index.core import VectorStoreIndex
#Loading
documents = SimpleDirectoryReader("dataset/txt").load_data()
print(documents[0])
#parser = SimpleFileNodeParser()
#nodes = parser.get_nodes_from_documents(documents)
#print(nodes[0])
#Index
index = VectorStoreIndex.from_documents(documents)
print(index)
#Querying
query_engine = index.as_query_engine()
print(query_engine)
response = query_engine.query("What is AIGC?")
print(response)
#Agent
from llama_index.core.indices.query.query_transform.base import StepDecomposeQueryTransform
from llama_index.core.query_engine import MultiStepQueryEngine
from llama_index.core.query_engine import SubQuestionQueryEngine
DEFAULT_STEP_DECOMPOSE_QUERY_TRANSFORM_TMPL = (
"The original question is as follows: {query_str}\n"
"We have an opportunity to answer some, or all of the question from a "
"knowledge source. "
"Context information for the knowledge source is provided below, as "
"well as previous reasoning steps.\n"
"Given the context and previous reasoning, return a question that can "
"be answered from "
"the context. This question can be the same as the original question, "
"or this question can represent a subcomponent of the overall question."
"It should not be irrelevant to the original question.\n"
"If we cannot extract more information from the context, provide 'None' "
"as the answer. "
"Some examples are given below: "
"\n\n"
"Question: How many Grand Slam titles does the winner of the 2020 Australian "
"Open have?\n"
"Knowledge source context: Provides names of the winners of the 2020 "
"Australian Open\n"
"Previous reasoning: None\n"
"Next question: Who was the winner of the 2020 Australian Open? "
"\n\n"
"Question: Who was the winner of the 2020 Australian Open?\n"
"Knowledge source context: Provides names of the winners of the 2020 "
"Australian Open\n"
"Previous reasoning: None.\n"
"New question: Who was the winner of the 2020 Australian Open? "
"\n\n"
"Question: How many Grand Slam titles does the winner of the 2020 Australian "
"Open have?\n"
"Knowledge source context: Provides information about the winners of the 2020 "
"Australian Open\n"
"Previous reasoning:\n"
"- Who was the winner of the 2020 Australian Open? \n"
"- The winner of the 2020 Australian Open was Novak Djokovic.\n"
"New question: None"
"\n\n"
"Question: How many Grand Slam titles does the winner of the 2020 Australian "
"Open have?\n"
"Knowledge source context: Provides information about the winners of the 2020 "
"Australian Open - includes biographical information for each winner\n"
"Previous reasoning:\n"
"- Who was the winner of the 2020 Australian Open? \n"
"- The winner of the 2020 Australian Open was Novak Djokovic.\n"
"New question: How many Grand Slam titles does Novak Djokovic have? "
"\n\n"
"Question: {query_str}\n"
"Knowledge source context: {context_str}\n"
"Previous reasoning: {prev_reasoning}\n"
"New question: "
)
print(DEFAULT_STEP_DECOMPOSE_QUERY_TRANSFORM_TMPL) | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader"
] | [((445, 487), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (476, 487), False, 'from llama_index.core import VectorStoreIndex\n'), ((257, 293), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""dataset/txt"""'], {}), "('dataset/txt')\n", (278, 293), False, 'from llama_index.core import SimpleDirectoryReader\n')] |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader("../data/paul_graham")
docs = reader.load_data()
import os
from llama_index.core import (
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
if not os.path.exists("storage"):
index = VectorStoreIndex.from_documents(docs)
index.set_index_id("vector_index")
index.storage_context.persist("./storage")
else:
storage_context = StorageContext.from_defaults(persist_dir="storage")
index = load_index_from_storage(storage_context, index_id="vector_index")
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core import PromptTemplate
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
p = QueryPipeline(chain=[prompt_tmpl, llm], verbose=True)
output = p.run(movie_name="The Departed")
print(str(output))
from typing import List
from pydantic import BaseModel, Field
from llama_index.core.output_parsers import PydanticOutputParser
class Movie(BaseModel):
"""Object representing a single movie."""
name: str = Field(..., description="Name of the movie.")
year: int = Field(..., description="Year of the movie.")
class Movies(BaseModel):
"""Object representing a list of movies."""
movies: List[Movie] = Field(..., description="List of movies.")
llm = OpenAI(model="gpt-3.5-turbo")
output_parser = PydanticOutputParser(Movies)
json_prompt_str = """\
Please generate related movies to {movie_name}. Output with the following JSON format:
"""
json_prompt_str = output_parser.format(json_prompt_str)
json_prompt_tmpl = PromptTemplate(json_prompt_str)
p = QueryPipeline(chain=[json_prompt_tmpl, llm, output_parser], verbose=True)
output = p.run(movie_name="Toy Story")
output
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
prompt_str2 = """\
Here's some text:
{text}
Can you rewrite this with a summary of each movie?
"""
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
llm_c = llm.as_query_component(streaming=True)
p = QueryPipeline(
chain=[prompt_tmpl, llm_c, prompt_tmpl2, llm_c], verbose=True
)
output = p.run(movie_name="The Dark Knight")
for o in output:
print(o.delta, end="")
p = QueryPipeline(
chain=[
json_prompt_tmpl,
llm.as_query_component(streaming=True),
output_parser,
],
verbose=True,
)
output = p.run(movie_name="Toy Story")
print(output)
from llama_index.postprocessor.cohere_rerank import CohereRerank
prompt_str1 = "Please generate a concise question about Paul Graham's life regarding the following topic {topic}"
prompt_tmpl1 = PromptTemplate(prompt_str1)
prompt_str2 = (
"Please write a passage to answer the question\n"
"Try to include as many key details as possible.\n"
"\n"
"\n"
"{query_str}\n"
"\n"
"\n"
'Passage:"""\n'
)
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
retriever = index.as_retriever(similarity_top_k=5)
p = QueryPipeline(
chain=[prompt_tmpl1, llm, prompt_tmpl2, llm, retriever], verbose=True
)
nodes = p.run(topic="college")
len(nodes)
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.response_synthesizers import TreeSummarize
prompt_str = "Please generate a question about Paul Graham's life regarding the following topic {topic}"
prompt_tmpl = PromptTemplate(prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
retriever = index.as_retriever(similarity_top_k=3)
reranker = CohereRerank()
summarizer = TreeSummarize(llm=llm)
p = QueryPipeline(verbose=True)
p.add_modules(
{
"llm": llm,
"prompt_tmpl": prompt_tmpl,
"retriever": retriever,
"summarizer": summarizer,
"reranker": reranker,
}
)
p.add_link("prompt_tmpl", "llm")
p.add_link("llm", "retriever")
p.add_link("retriever", "reranker", dest_key="nodes")
p.add_link("llm", "reranker", dest_key="query_str")
p.add_link("reranker", "summarizer", dest_key="nodes")
p.add_link("llm", "summarizer", dest_key="query_str")
print(summarizer.as_query_component().input_keys)
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(p.dag)
net.show("rag_dag.html")
response = p.run(topic="YC")
print(str(response))
response = await p.arun(topic="YC")
print(str(response))
from llama_index.postprocessor.cohere_rerank import CohereRerank
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.query_pipeline import InputComponent
retriever = index.as_retriever(similarity_top_k=5)
summarizer = TreeSummarize(llm=OpenAI(model="gpt-3.5-turbo"))
reranker = CohereRerank()
p = QueryPipeline(verbose=True)
p.add_modules(
{
"input": InputComponent(),
"retriever": retriever,
"summarizer": summarizer,
}
)
p.add_link("input", "retriever")
p.add_link("input", "summarizer", dest_key="query_str")
p.add_link("retriever", "summarizer", dest_key="nodes")
output = p.run(input="what did the author do in YC")
print(str(output))
from llama_index.core.query_pipeline import (
CustomQueryComponent,
InputKeys,
OutputKeys,
)
from typing import Dict, Any
from llama_index.core.llms.llm import LLM
from pydantic import Field
class RelatedMovieComponent(CustomQueryComponent):
"""Related movie component."""
llm: LLM = Field(..., description="OpenAI LLM")
def _validate_component_inputs(
self, input: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
return input
@property
def _input_keys(self) -> set:
"""Input keys dict."""
return {"movie"}
@property
def _output_keys(self) -> set:
return {"output"}
def _run_component(self, **kwargs) -> Dict[str, Any]:
"""Run the component."""
prompt_str = "Please generate related movies to {movie_name}"
prompt_tmpl = PromptTemplate(prompt_str)
p = QueryPipeline(chain=[prompt_tmpl, llm])
return {"output": p.run(movie_name=kwargs["movie"])}
llm = OpenAI(model="gpt-3.5-turbo")
component = RelatedMovieComponent(llm=llm)
prompt_str = """\
Here's some text:
{text}
Can you rewrite this in the voice of Shakespeare?
"""
prompt_tmpl = PromptTemplate(prompt_str)
p = QueryPipeline(chain=[component, prompt_tmpl, llm], verbose=True)
output = p.run(movie="Love Actually")
print(str(output)) | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.llms.openai.OpenAI",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.query_pipeline.InputComponent",
"llama_index.core.PromptTemplate",
"llama_index.core.query_pipeline.QueryPipeline",
"llama_index.core.output_parsers.PydanticOutputParser",
"llama_index.core.response_synthesizers.TreeSummarize",
"llama_index.core.SimpleDirectoryReader",
"llama_index.postprocessor.cohere_rerank.CohereRerank",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((259, 274), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (272, 274), True, 'import phoenix as px\n'), ((510, 539), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (516, 539), False, 'from llama_index.llms.openai import OpenAI\n'), ((563, 610), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-3-small"""'}), "(model='text-embedding-3-small')\n", (578, 610), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((674, 718), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""../data/paul_graham"""'], {}), "('../data/paul_graham')\n", (695, 718), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((1374, 1400), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (1388, 1400), False, 'from llama_index.core import PromptTemplate\n'), ((1407, 1436), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (1413, 1436), False, 'from llama_index.llms.openai import OpenAI\n'), ((1442, 1495), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_tmpl, llm]', 'verbose': '(True)'}), '(chain=[prompt_tmpl, llm], verbose=True)\n', (1455, 1495), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((2038, 2067), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (2044, 2067), False, 'from llama_index.llms.openai import OpenAI\n'), ((2084, 2112), 'llama_index.core.output_parsers.PydanticOutputParser', 'PydanticOutputParser', (['Movies'], {}), '(Movies)\n', (2104, 2112), False, 'from llama_index.core.output_parsers import PydanticOutputParser\n'), ((2305, 2336), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['json_prompt_str'], {}), '(json_prompt_str)\n', (2319, 2336), False, 'from llama_index.core import PromptTemplate\n'), ((2342, 2415), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[json_prompt_tmpl, llm, output_parser]', 'verbose': '(True)'}), '(chain=[json_prompt_tmpl, llm, output_parser], verbose=True)\n', (2355, 2415), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((2544, 2570), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (2558, 2570), False, 'from llama_index.core import PromptTemplate\n'), ((2687, 2714), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str2'], {}), '(prompt_str2)\n', (2701, 2714), False, 'from llama_index.core import PromptTemplate\n'), ((2721, 2750), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (2727, 2750), False, 'from llama_index.llms.openai import OpenAI\n'), ((2803, 2879), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_tmpl, llm_c, prompt_tmpl2, llm_c]', 'verbose': '(True)'}), '(chain=[prompt_tmpl, llm_c, prompt_tmpl2, llm_c], verbose=True)\n', (2816, 2879), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((3388, 3415), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str1'], {}), '(prompt_str1)\n', (3402, 3415), False, 'from llama_index.core import PromptTemplate\n'), ((3635, 3662), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str2'], {}), '(prompt_str2)\n', (3649, 3662), False, 'from llama_index.core import PromptTemplate\n'), ((3670, 3699), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (3676, 3699), False, 'from llama_index.llms.openai import OpenAI\n'), ((3755, 3843), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_tmpl1, llm, prompt_tmpl2, llm, retriever]', 'verbose': '(True)'}), '(chain=[prompt_tmpl1, llm, prompt_tmpl2, llm, retriever],\n verbose=True)\n', (3768, 3843), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((4145, 4171), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (4159, 4171), False, 'from llama_index.core import PromptTemplate\n'), ((4178, 4207), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (4184, 4207), False, 'from llama_index.llms.openai import OpenAI\n'), ((4270, 4284), 'llama_index.postprocessor.cohere_rerank.CohereRerank', 'CohereRerank', ([], {}), '()\n', (4282, 4284), False, 'from llama_index.postprocessor.cohere_rerank import CohereRerank\n'), ((4298, 4320), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'llm': 'llm'}), '(llm=llm)\n', (4311, 4320), False, 'from llama_index.core.response_synthesizers import TreeSummarize\n'), ((4327, 4354), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'verbose': '(True)'}), '(verbose=True)\n', (4340, 4354), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((4913, 4975), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (4920, 4975), False, 'from pyvis.network import Network\n'), ((5451, 5465), 'llama_index.postprocessor.cohere_rerank.CohereRerank', 'CohereRerank', ([], {}), '()\n', (5463, 5465), False, 'from llama_index.postprocessor.cohere_rerank import CohereRerank\n'), ((5472, 5499), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'verbose': '(True)'}), '(verbose=True)\n', (5485, 5499), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((6893, 6922), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (6899, 6922), False, 'from llama_index.llms.openai import OpenAI\n'), ((7080, 7106), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (7094, 7106), False, 'from llama_index.core import PromptTemplate\n'), ((7112, 7176), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[component, prompt_tmpl, llm]', 'verbose': '(True)'}), '(chain=[component, prompt_tmpl, llm], verbose=True)\n', (7125, 7176), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((871, 896), 'os.path.exists', 'os.path.exists', (['"""storage"""'], {}), "('storage')\n", (885, 896), False, 'import os\n'), ((910, 947), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {}), '(docs)\n', (941, 947), False, 'from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((1062, 1113), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""storage"""'}), "(persist_dir='storage')\n", (1090, 1113), False, 'from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((1126, 1191), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'index_id': '"""vector_index"""'}), "(storage_context, index_id='vector_index')\n", (1149, 1191), False, 'from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((1780, 1824), 'pydantic.Field', 'Field', (['...'], {'description': '"""Name of the movie."""'}), "(..., description='Name of the movie.')\n", (1785, 1824), False, 'from pydantic import Field\n'), ((1841, 1885), 'pydantic.Field', 'Field', (['...'], {'description': '"""Year of the movie."""'}), "(..., description='Year of the movie.')\n", (1846, 1885), False, 'from pydantic import Field\n'), ((1988, 2029), 'pydantic.Field', 'Field', (['...'], {'description': '"""List of movies."""'}), "(..., description='List of movies.')\n", (1993, 2029), False, 'from pydantic import Field\n'), ((6161, 6197), 'pydantic.Field', 'Field', (['...'], {'description': '"""OpenAI LLM"""'}), "(..., description='OpenAI LLM')\n", (6166, 6197), False, 'from pydantic import Field\n'), ((5409, 5438), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (5415, 5438), False, 'from llama_index.llms.openai import OpenAI\n'), ((5538, 5554), 'llama_index.core.query_pipeline.InputComponent', 'InputComponent', ([], {}), '()\n', (5552, 5554), False, 'from llama_index.core.query_pipeline import InputComponent\n'), ((6744, 6770), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (6758, 6770), False, 'from llama_index.core import PromptTemplate\n'), ((6783, 6822), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_tmpl, llm]'}), '(chain=[prompt_tmpl, llm])\n', (6796, 6822), False, 'from llama_index.core.query_pipeline import QueryPipeline\n')] |
import logging
from dataclasses import dataclass
from typing import Optional
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.llm_predictor import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.llms.base import LLM
from llama_index.llms.utils import LLMType, resolve_llm
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser
from llama_index.node_parser.sentence_window import SentenceWindowNodeParser
from llama_index.node_parser.simple import SimpleNodeParser
from llama_index.prompts.base import BasePromptTemplate
from llama_index.text_splitter.types import TextSplitter
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SimpleNodeParser.from_defaults(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
node_parser: dict
text_splitter: Optional[dict]
metadata_extractor: Optional[dict]
extractors: Optional[list]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
node_parser: NodeParser
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
llama_logger=llama_logger,
callback_manager=callback_manager,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = llm_predictor or LLMPredictor(llm=llm)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or service_context.node_parser
if chunk_size is not None or chunk_overlap is not None:
node_parser = _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
if not isinstance(self.llm_predictor, LLMPredictor):
raise ValueError("llm_predictor must be an instance of LLMPredictor")
return self.llm_predictor.llm
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
node_parser_dict = self.node_parser.to_dict()
metadata_extractor_dict = None
extractor_dicts = None
text_splitter_dict = None
if isinstance(self.node_parser, SimpleNodeParser) and isinstance(
self.node_parser.text_splitter, TextSplitter
):
text_splitter_dict = self.node_parser.text_splitter.to_dict()
if isinstance(self.node_parser, (SimpleNodeParser, SentenceWindowNodeParser)):
if self.node_parser.metadata_extractor:
metadata_extractor_dict = self.node_parser.metadata_extractor.to_dict()
extractor_dicts = []
for extractor in self.node_parser.metadata_extractor.extractors:
extractor_dicts.append(extractor.to_dict())
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
node_parser=node_parser_dict,
text_splitter=text_splitter_dict,
metadata_extractor=metadata_extractor_dict,
extractors=extractor_dicts,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.embeddings.loading import load_embed_model
from llama_index.llm_predictor.loading import load_predictor
from llama_index.llms.loading import load_llm
from llama_index.node_parser.extractors.loading import load_extractor
from llama_index.node_parser.loading import load_parser
from llama_index.text_splitter.loading import load_text_splitter
service_context_data = ServiceContextData.parse_obj(data)
llm = load_llm(service_context_data.llm)
llm_predictor = load_predictor(service_context_data.llm_predictor, llm=llm)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
extractors = None
if service_context_data.extractors:
extractors = []
for extractor_dict in service_context_data.extractors:
extractors.append(load_extractor(extractor_dict, llm=llm))
metadata_extractor = None
if service_context_data.metadata_extractor:
metadata_extractor = load_extractor(
service_context_data.metadata_extractor,
extractors=extractors,
)
text_splitter = None
if service_context_data.text_splitter:
text_splitter = load_text_splitter(service_context_data.text_splitter)
node_parser = load_parser(
service_context_data.node_parser,
text_splitter=text_splitter,
metadata_extractor=metadata_extractor,
)
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.text_splitter.loading.load_text_splitter",
"llama_index.node_parser.extractors.loading.load_extractor",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.llm_predictor.LLMPredictor",
"llama_index.logger.LlamaLogger",
"llama_index.embeddings.utils.resolve_embed_model",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.callbacks.base.CallbackManager",
"llama_index.llms.loading.load_llm",
"llama_index.node_parser.simple.SimpleNodeParser.from_defaults",
"llama_index.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.llm_predictor.loading.load_predictor"
] | [((965, 992), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (982, 992), False, 'import logging\n'), ((1223, 1345), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callback_manager': 'callback_manager'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, callback_manager=callback_manager)\n', (1253, 1345), False, 'from llama_index.node_parser.simple import SimpleNodeParser\n'), ((1748, 1805), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1778, 1805), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6295, 6327), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (6314, 6327), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((9351, 9383), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (9370, 9383), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((12743, 12777), 'llama_index.llms.loading.load_llm', 'load_llm', (['service_context_data.llm'], {}), '(service_context_data.llm)\n', (12751, 12777), False, 'from llama_index.llms.loading import load_llm\n'), ((12802, 12861), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {'llm': 'llm'}), '(service_context_data.llm_predictor, llm=llm)\n', (12816, 12861), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((12885, 12935), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (12901, 12935), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((12961, 13019), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (12983, 13019), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((13690, 13807), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['service_context_data.node_parser'], {'text_splitter': 'text_splitter', 'metadata_extractor': 'metadata_extractor'}), '(service_context_data.node_parser, text_splitter=text_splitter,\n metadata_extractor=metadata_extractor)\n', (13701, 13807), False, 'from llama_index.node_parser.loading import load_parser\n'), ((5628, 5647), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (5643, 5647), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((5815, 5831), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (5826, 5831), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((5873, 5894), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (5885, 5894), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((6828, 6841), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (6839, 6841), False, 'from llama_index.logger import LlamaLogger\n'), ((8646, 8662), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (8657, 8662), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((8691, 8712), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (8703, 8712), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((13381, 13459), 'llama_index.node_parser.extractors.loading.load_extractor', 'load_extractor', (['service_context_data.metadata_extractor'], {'extractors': 'extractors'}), '(service_context_data.metadata_extractor, extractors=extractors)\n', (13395, 13459), False, 'from llama_index.node_parser.extractors.loading import load_extractor\n'), ((13612, 13666), 'llama_index.text_splitter.loading.load_text_splitter', 'load_text_splitter', (['service_context_data.text_splitter'], {}), '(service_context_data.text_splitter)\n', (13630, 13666), False, 'from llama_index.text_splitter.loading import load_text_splitter\n'), ((13220, 13259), 'llama_index.node_parser.extractors.loading.load_extractor', 'load_extractor', (['extractor_dict'], {'llm': 'llm'}), '(extractor_dict, llm=llm)\n', (13234, 13259), False, 'from llama_index.node_parser.extractors.loading import load_extractor\n')] |
from urllib import response
import llama_index
from pathlib import Path
from typing import Annotated, List
from fastapi.responses import StreamingResponse
from fastapi import (
File,
Form,
UploadFile,
APIRouter,
Depends,
HTTPException,
Request,
status
)
from llama_index import StorageContext, VectorStoreIndex, SummaryIndex
from llama_index.selectors.llm_selectors import LLMSingleSelector
from llama_index.llms.types import MessageRole, ChatMessage
from llama_index.retrievers import VectorIndexRetriever, SummaryIndexEmbeddingRetriever, RouterRetriever
from llama_index.tools import RetrieverTool
from llama_index.chat_engine import ContextChatEngine
from llama_index.memory import ChatMemoryBuffer
from llama_index.vector_stores import (
MetadataFilter,
MetadataFilters,
FilterOperator
)
from llama_index.callbacks import CallbackManager, LlamaDebugHandler
from app.utils.json_to import json_to_model
from app.utils.index import get_index
from app.utils.auth import decode_access_token
from app.utils.fs import get_s3_fs, get_s3_boto_client
from app.db.pg_vector import get_vector_store_singleton
from app.db.crud import get_documents, create_documents, delete_document, is_user_existed
from app.pydantic_models.chat import ChatData
from app.orm_models import Document
from app.core.ingest import ingest_user_documents
from app.prompts.system import LLM_SYSTEM_MESSAGE
from app.prompts.selector import MULTI_SELECT_PROMPT_TEMPLATE, SINGLE_SELECTOR_PROMPT_TEMPLATE
chat_router = r = APIRouter()
@r.post("")
async def chat(
request: Request,
# Note: To support clients sending a JSON object using content-type "text/plain",
# we need to use Depends(json_to_model(_ChatData)) here
data: Annotated[ChatData, Depends(json_to_model(ChatData))],
index: Annotated[dict, Depends(get_index)],
token_payload: Annotated[dict, Depends(decode_access_token)]
):
# logger = logging.getLogger("uvicorn")
user_id = token_payload["user_id"]
# Only need to retrieve indices from the current user.
filters = MetadataFilters(
filters=[
MetadataFilter(
key="user_id",
operator=FilterOperator.EQ,
value=user_id),
]
)
# check preconditions and get last message
if len(data.messages) == 0:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No messages provided",
)
lastMessage = data.messages.pop()
if lastMessage.role != MessageRole.USER:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Last message must be from user",
)
# convert messages coming from the request to type ChatMessage
messages = [
ChatMessage(
role=m.role,
content=m.content,
)
for m in data.messages
]
# query chat engine
# system_message = (
# "You are a professional job candidate who will answer the recruiter question using the context information."
# "If the question is out of scope, kindly apologize and refuse to answer."
# )
# Callbacks for observability.
# TODO: this is not working.
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
callback_manager = CallbackManager([llama_debug])
vs_retriever = VectorIndexRetriever(
index=index["vector"],
similarity_top_k=3,
filters=filters,
)
summary_retriever = SummaryIndexEmbeddingRetriever(
index=index["summary"],
similarity_top_k=3,
)
vs_tool = RetrieverTool.from_defaults(
retriever=vs_retriever,
description="Useful for retrieving specific context from uploaded documents."
)
summary_tool = RetrieverTool.from_defaults(
retriever=summary_retriever,
description="Useful to retrieve all context from uploaded documents and summary tasks. Don't use if the question only requires more specific context."
)
# TODO: correct the prompt used by LLM to use router retriever.
retriever = RouterRetriever(
selector=LLMSingleSelector.from_defaults(
# prompt_template_str=SINGLE_SELECTOR_PROMPT_TEMPLATE
),
retriever_tools=[vs_tool, summary_tool]
)
chat_engine = ContextChatEngine(
retriever=vs_retriever,
llm=llama_index.global_service_context.llm,
memory=ChatMemoryBuffer.from_defaults(token_limit=4096),
prefix_messages=[ChatMessage(
role="system", content=LLM_SYSTEM_MESSAGE)],
callback_manager=callback_manager,
)
print(chat_engine._retriever.get_prompts())
response = chat_engine.stream_chat(lastMessage.content, messages)
# stream response
async def event_generator():
for token in response.response_gen:
# If client closes connection, stop sending events
if await request.is_disconnected():
break
yield token
return StreamingResponse(event_generator(), media_type="text/plain")
@r.post("/upload/single")
async def upload(
description: Annotated[str, Form()],
question: Annotated[str, Form()],
file: Annotated[UploadFile, File()],
token_payload: Annotated[dict, Depends(decode_access_token)]
) -> Document:
vector_store = await get_vector_store_singleton()
user_id = token_payload["user_id"]
user_s3_folder = Path(f"talking-resume/{user_id}")
nodes = []
# Have to use boto because I don't know how to write temporary file to s3 using f3fs.
s3 = get_s3_boto_client()
doc = Document(
s3_path=f"{user_id}/{file.filename}",
is_active=True,
description=description,
question=question,
user_id=user_id,
)
# Create new record in db.
doc_in_db = create_documents([doc])[0]
doc_uuid = str(doc_in_db.id)
# Save the document to S3.
s3.upload_fileobj(
file.file,
"talking-resume",
doc.s3_path,
)
nodes = ingest_user_documents(
doc_uuid,
f"talking-resume/{doc.s3_path}",
doc.description,
doc.question,
doc.user_id
)
# Save documents indices and embeddings.
s3 = get_s3_fs()
persist_dir = None
if await is_user_existed(user_id):
persist_dir = f"talking-resume/{user_id}"
storage_context = StorageContext.from_defaults(
vector_store=vector_store,
persist_dir=persist_dir,
fs=s3)
# Vector store index.
vector_index = VectorStoreIndex.from_documents(
documents=nodes, storage_context=storage_context, show_progress=True)
vector_index.set_index_id(f'vector_{user_id}')
vector_index.storage_context.persist(persist_dir=user_s3_folder, fs=s3)
# Summary index.
summary_index = SummaryIndex.from_documents(
documents=nodes, storage_context=storage_context, show_progress=True)
summary_index.set_index_id(f'summary_{user_id}')
summary_index.storage_context.persist(persist_dir=user_s3_folder, fs=s3)
return doc_in_db
@r.get("/upload")
def get_upload(
user_id: str,
token_payload: Annotated[dict, Depends(decode_access_token)]
) -> List[Document]:
documents = get_documents(user_id)
for document in documents:
s3 = get_s3_boto_client()
s3_url = s3.generate_presigned_url(
"get_object",
Params={
"Bucket": "talking-resume",
"Key": document.s3_path,
"ResponseContentDisposition": "inline",
"ResponseContentType": "application/pdf"})
document.s3_url = s3_url
return documents
@r.delete("/upload")
async def delete_upload(
document_id: str,
user_id: str,
) -> None:
await delete_document(document_id, user_id)
@r.post("/upload/multiple")
async def upload(
descriptions: Annotated[List[str], Form()],
questions: Annotated[List[str], Form()],
files: Annotated[List[UploadFile], File()],
token_payload: Annotated[dict, Depends(decode_access_token)]
) -> List[Document]:
vector_store = await get_vector_store_singleton()
user_id = token_payload["user_id"]
user_s3_folder = Path(f"talking-resume/{user_id}")
# TODO: smartly remove or inactivate documents instead of full deletion.
# if await is_user_existed(user_id):
# await delete_all_documents_from_user(user_id)
# Have to use boto because I don't know how to write temporary file to s3 using f3fs.
s3 = get_s3_boto_client()
nodes = []
docs = []
for user_document, description, question in zip(files, descriptions, questions):
doc = Document(
s3_path=f"{user_id}/{user_document.filename}",
is_active=True,
description=description,
question=question,
user_id=user_id,
)
# Save the document to S3.
s3.upload_fileobj(
user_document.file,
"talking-resume",
doc.s3_path,
)
nodes.extend(ingest_user_documents(
f"talking-resume/{doc.s3_path}", doc.description, doc.question, doc.user_id))
docs.append(doc)
# Save documents indices and embeddings.
s3 = get_s3_fs()
storage_context = StorageContext.from_defaults(
vector_store=vector_store, fs=s3)
# Vector store index.
vector_index = VectorStoreIndex.from_documents(
documents=nodes, storage_context=storage_context)
vector_index.set_index_id(user_id)
vector_index.storage_context.persist(persist_dir=user_s3_folder, fs=s3)
# Create new record in db.
docs = create_documents(docs)
return docs
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.memory.ChatMemoryBuffer.from_defaults",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.SummaryIndex.from_documents",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.StorageContext.from_defaults",
"llama_index.selectors.llm_selectors.LLMSingleSelector.from_defaults",
"llama_index.tools.RetrieverTool.from_defaults",
"llama_index.vector_stores.MetadataFilter",
"llama_index.retrievers.SummaryIndexEmbeddingRetriever",
"llama_index.callbacks.CallbackManager",
"llama_index.llms.types.ChatMessage"
] | [((1534, 1545), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (1543, 1545), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((3266, 3308), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (3283, 3308), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler\n'), ((3332, 3362), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[llama_debug]'], {}), '([llama_debug])\n', (3347, 3362), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler\n'), ((3383, 3468), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': "index['vector']", 'similarity_top_k': '(3)', 'filters': 'filters'}), "(index=index['vector'], similarity_top_k=3, filters=filters\n )\n", (3403, 3468), False, 'from llama_index.retrievers import VectorIndexRetriever, SummaryIndexEmbeddingRetriever, RouterRetriever\n'), ((3519, 3593), 'llama_index.retrievers.SummaryIndexEmbeddingRetriever', 'SummaryIndexEmbeddingRetriever', ([], {'index': "index['summary']", 'similarity_top_k': '(3)'}), "(index=index['summary'], similarity_top_k=3)\n", (3549, 3593), False, 'from llama_index.retrievers import VectorIndexRetriever, SummaryIndexEmbeddingRetriever, RouterRetriever\n'), ((3632, 3767), 'llama_index.tools.RetrieverTool.from_defaults', 'RetrieverTool.from_defaults', ([], {'retriever': 'vs_retriever', 'description': '"""Useful for retrieving specific context from uploaded documents."""'}), "(retriever=vs_retriever, description=\n 'Useful for retrieving specific context from uploaded documents.')\n", (3659, 3767), False, 'from llama_index.tools import RetrieverTool\n'), ((3804, 4022), 'llama_index.tools.RetrieverTool.from_defaults', 'RetrieverTool.from_defaults', ([], {'retriever': 'summary_retriever', 'description': '"""Useful to retrieve all context from uploaded documents and summary tasks. Don\'t use if the question only requires more specific context."""'}), '(retriever=summary_retriever, description=\n "Useful to retrieve all context from uploaded documents and summary tasks. Don\'t use if the question only requires more specific context."\n )\n', (3831, 4022), False, 'from llama_index.tools import RetrieverTool\n'), ((5459, 5492), 'pathlib.Path', 'Path', (['f"""talking-resume/{user_id}"""'], {}), "(f'talking-resume/{user_id}')\n", (5463, 5492), False, 'from pathlib import Path\n'), ((5608, 5628), 'app.utils.fs.get_s3_boto_client', 'get_s3_boto_client', ([], {}), '()\n', (5626, 5628), False, 'from app.utils.fs import get_s3_fs, get_s3_boto_client\n'), ((5639, 5767), 'app.orm_models.Document', 'Document', ([], {'s3_path': 'f"""{user_id}/{file.filename}"""', 'is_active': '(True)', 'description': 'description', 'question': 'question', 'user_id': 'user_id'}), "(s3_path=f'{user_id}/{file.filename}', is_active=True, description=\n description, question=question, user_id=user_id)\n", (5647, 5767), False, 'from app.orm_models import Document\n'), ((6056, 6169), 'app.core.ingest.ingest_user_documents', 'ingest_user_documents', (['doc_uuid', 'f"""talking-resume/{doc.s3_path}"""', 'doc.description', 'doc.question', 'doc.user_id'], {}), "(doc_uuid, f'talking-resume/{doc.s3_path}', doc.\n description, doc.question, doc.user_id)\n", (6077, 6169), False, 'from app.core.ingest import ingest_user_documents\n'), ((6266, 6277), 'app.utils.fs.get_s3_fs', 'get_s3_fs', ([], {}), '()\n', (6275, 6277), False, 'from app.utils.fs import get_s3_fs, get_s3_boto_client\n'), ((6413, 6505), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store', 'persist_dir': 'persist_dir', 'fs': 's3'}), '(vector_store=vector_store, persist_dir=\n persist_dir, fs=s3)\n', (6441, 6505), False, 'from llama_index import StorageContext, VectorStoreIndex, SummaryIndex\n'), ((6571, 6677), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'nodes', 'storage_context': 'storage_context', 'show_progress': '(True)'}), '(documents=nodes, storage_context=\n storage_context, show_progress=True)\n', (6602, 6677), False, 'from llama_index import StorageContext, VectorStoreIndex, SummaryIndex\n'), ((6851, 6953), 'llama_index.SummaryIndex.from_documents', 'SummaryIndex.from_documents', ([], {'documents': 'nodes', 'storage_context': 'storage_context', 'show_progress': '(True)'}), '(documents=nodes, storage_context=\n storage_context, show_progress=True)\n', (6878, 6953), False, 'from llama_index import StorageContext, VectorStoreIndex, SummaryIndex\n'), ((7266, 7288), 'app.db.crud.get_documents', 'get_documents', (['user_id'], {}), '(user_id)\n', (7279, 7288), False, 'from app.db.crud import get_documents, create_documents, delete_document, is_user_existed\n'), ((8235, 8268), 'pathlib.Path', 'Path', (['f"""talking-resume/{user_id}"""'], {}), "(f'talking-resume/{user_id}')\n", (8239, 8268), False, 'from pathlib import Path\n'), ((8544, 8564), 'app.utils.fs.get_s3_boto_client', 'get_s3_boto_client', ([], {}), '()\n', (8562, 8564), False, 'from app.utils.fs import get_s3_fs, get_s3_boto_client\n'), ((9271, 9282), 'app.utils.fs.get_s3_fs', 'get_s3_fs', ([], {}), '()\n', (9280, 9282), False, 'from app.utils.fs import get_s3_fs, get_s3_boto_client\n'), ((9305, 9367), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store', 'fs': 's3'}), '(vector_store=vector_store, fs=s3)\n', (9333, 9367), False, 'from llama_index import StorageContext, VectorStoreIndex, SummaryIndex\n'), ((9423, 9509), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'nodes', 'storage_context': 'storage_context'}), '(documents=nodes, storage_context=\n storage_context)\n', (9454, 9509), False, 'from llama_index import StorageContext, VectorStoreIndex, SummaryIndex\n'), ((9672, 9694), 'app.db.crud.create_documents', 'create_documents', (['docs'], {}), '(docs)\n', (9688, 9694), False, 'from app.db.crud import get_documents, create_documents, delete_document, is_user_existed\n'), ((2361, 2451), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (2374, 2451), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((2579, 2679), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (2592, 2679), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((2802, 2845), 'llama_index.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (2813, 2845), False, 'from llama_index.llms.types import MessageRole, ChatMessage\n'), ((5370, 5398), 'app.db.pg_vector.get_vector_store_singleton', 'get_vector_store_singleton', ([], {}), '()\n', (5396, 5398), False, 'from app.db.pg_vector import get_vector_store_singleton\n'), ((5857, 5880), 'app.db.crud.create_documents', 'create_documents', (['[doc]'], {}), '([doc])\n', (5873, 5880), False, 'from app.db.crud import get_documents, create_documents, delete_document, is_user_existed\n'), ((6314, 6338), 'app.db.crud.is_user_existed', 'is_user_existed', (['user_id'], {}), '(user_id)\n', (6329, 6338), False, 'from app.db.crud import get_documents, create_documents, delete_document, is_user_existed\n'), ((7333, 7353), 'app.utils.fs.get_s3_boto_client', 'get_s3_boto_client', ([], {}), '()\n', (7351, 7353), False, 'from app.utils.fs import get_s3_fs, get_s3_boto_client\n'), ((7808, 7845), 'app.db.crud.delete_document', 'delete_document', (['document_id', 'user_id'], {}), '(document_id, user_id)\n', (7823, 7845), False, 'from app.db.crud import get_documents, create_documents, delete_document, is_user_existed\n'), ((8146, 8174), 'app.db.pg_vector.get_vector_store_singleton', 'get_vector_store_singleton', ([], {}), '()\n', (8172, 8174), False, 'from app.db.pg_vector import get_vector_store_singleton\n'), ((8693, 8829), 'app.orm_models.Document', 'Document', ([], {'s3_path': 'f"""{user_id}/{user_document.filename}"""', 'is_active': '(True)', 'description': 'description', 'question': 'question', 'user_id': 'user_id'}), "(s3_path=f'{user_id}/{user_document.filename}', is_active=True,\n description=description, question=question, user_id=user_id)\n", (8701, 8829), False, 'from app.orm_models import Document\n'), ((4154, 4187), 'llama_index.selectors.llm_selectors.LLMSingleSelector.from_defaults', 'LLMSingleSelector.from_defaults', ([], {}), '()\n', (4185, 4187), False, 'from llama_index.selectors.llm_selectors import LLMSingleSelector\n'), ((4455, 4503), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4096)'}), '(token_limit=4096)\n', (4485, 4503), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((9078, 9181), 'app.core.ingest.ingest_user_documents', 'ingest_user_documents', (['f"""talking-resume/{doc.s3_path}"""', 'doc.description', 'doc.question', 'doc.user_id'], {}), "(f'talking-resume/{doc.s3_path}', doc.description, doc\n .question, doc.user_id)\n", (9099, 9181), False, 'from app.core.ingest import ingest_user_documents\n'), ((1836, 1854), 'fastapi.Depends', 'Depends', (['get_index'], {}), '(get_index)\n', (1843, 1854), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((1892, 1920), 'fastapi.Depends', 'Depends', (['decode_access_token'], {}), '(decode_access_token)\n', (1899, 1920), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((2128, 2200), 'llama_index.vector_stores.MetadataFilter', 'MetadataFilter', ([], {'key': '"""user_id"""', 'operator': 'FilterOperator.EQ', 'value': 'user_id'}), "(key='user_id', operator=FilterOperator.EQ, value=user_id)\n", (2142, 2200), False, 'from llama_index.vector_stores import MetadataFilter, MetadataFilters, FilterOperator\n'), ((4530, 4584), 'llama_index.llms.types.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': 'LLM_SYSTEM_MESSAGE'}), "(role='system', content=LLM_SYSTEM_MESSAGE)\n", (4541, 4584), False, 'from llama_index.llms.types import MessageRole, ChatMessage\n'), ((5177, 5183), 'fastapi.Form', 'Form', ([], {}), '()\n', (5181, 5183), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((5215, 5221), 'fastapi.Form', 'Form', ([], {}), '()\n', (5219, 5221), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((5256, 5262), 'fastapi.File', 'File', ([], {}), '()\n', (5260, 5262), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((5300, 5328), 'fastapi.Depends', 'Depends', (['decode_access_token'], {}), '(decode_access_token)\n', (5307, 5328), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((7199, 7227), 'fastapi.Depends', 'Depends', (['decode_access_token'], {}), '(decode_access_token)\n', (7206, 7227), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((7933, 7939), 'fastapi.Form', 'Form', ([], {}), '()\n', (7937, 7939), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((7978, 7984), 'fastapi.Form', 'Form', ([], {}), '()\n', (7982, 7984), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((8026, 8032), 'fastapi.File', 'File', ([], {}), '()\n', (8030, 8032), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((8070, 8098), 'fastapi.Depends', 'Depends', (['decode_access_token'], {}), '(decode_access_token)\n', (8077, 8098), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((1782, 1805), 'app.utils.json_to.json_to_model', 'json_to_model', (['ChatData'], {}), '(ChatData)\n', (1795, 1805), False, 'from app.utils.json_to import json_to_model\n')] |
import utils
import os
import requests
import llama_index
import torch
import llama_cpp
from llama_index import SimpleDirectoryReader
from llama_index import Document
from llama_index import VectorStoreIndex
from llama_index import ServiceContext
from llama_index import LLMPredictor
# Paramas
llama = True
### Get data
dirpath = 'related_works/Cloud_VM/'
filename = dirpath + 'ey.pdf'
url = 'https://assets.ey.com/content/dam/ey-sites/ey-com/nl_nl/topics/jaarverslag/downloads-pdfs/2022-2023/ey-nl-financial-statements-2023-en.pdf'
if not os.path.exists(filename):
print(f"Downloading {filename} from {url}...")
response = requests.get(url)
with open(dirpath + 'ey.pdf', 'wb') as f:
f.write(response.content)
documents = SimpleDirectoryReader(
input_files=[filename]
).load_data()
### Print data
print(type(documents), "\n")
print(len(documents), "\n")
print(type(documents[0]))
print(documents[0])
### Create doc object
document = Document(text="\n\n".join([doc.text for doc in documents]))
### load model
model_name_or_path = "TheBloke/Llama-2-13B-chat-GGML"
model_basename = "llama-2-13b-chat.ggmlv3.q5_1.bin" # the model is in bin format
from huggingface_hub import hf_hub_download
model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename)
if llama:
# GPU
from llama_cpp import Llama
llm = None
llm = Llama(
model_path=model_path,
n_threads=2, # CPU cores
n_batch=512, # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
n_gpu_layers=43, # Change this value based on your model and your GPU VRAM pool.
n_ctx=4096, # Context window
)
else:
from transformers import LlamaTokenizer, LlamaForCausalLM
tokenizer = LlamaTokenizer.from_pretrained('ChanceFocus/finma-7b-full')
llm = LlamaForCausalLM.from_pretrained('ChanceFocus/finma-7b-full', device_map='auto')
##### The replicate endpoint
from llama_index.llms import Replicate
from llama_index import ServiceContext, set_global_service_context
from llama_index.llms.llama_utils import (
messages_to_prompt,
completion_to_prompt,
)
LLAMA_13B_V2_CHAT = "a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5"
# inject custom system prompt into llama-2
def custom_completion_to_prompt(completion: str) -> str:
return completion_to_prompt(
completion,
system_prompt=(
"You are a Q&A assistant. Your goal is to answer questions as "
"accurately as possible is the instructions and context provided."
),
)
llm = Replicate(
model=LLAMA_13B_V2_CHAT,
temperature=0.01,
# override max tokens since it's interpreted
# as context window instead of max tokens
context_window=4096,
# override completion representation for llama 2
completion_to_prompt=custom_completion_to_prompt,
# if using llama 2 for data agents, also override the message representation
messages_to_prompt=messages_to_prompt,
)
service_context = ServiceContext.from_defaults(
llm=llm, embed_model="local:BAAI/bge-small-en-v1.5"
)
index = VectorStoreIndex.from_documents([document],
service_context=service_context)
query_engine = index.as_query_engine()
response = query_engine.query(
"What actions is Ernst & Young Global Limited taking to address climate change issues?"
)
print(str(response))
# ## Evaluation setup using TruLens
eval_questions = []
with open('eval_questions.txt', 'r') as file:
for line in file:
# Remove newline character and convert to integer
item = line.strip()
print(item)
eval_questions.append(item)
# You can try your own question:
new_question = "What is the right AI job for me?"
eval_questions.append(new_question)
print(eval_questions)
from trulens_eval import Tru
tru = Tru()
tru.reset_database()
from utils import get_prebuilt_trulens_recorder
tru_recorder = get_prebuilt_trulens_recorder(query_engine,
app_id="Direct Query Engine")
with tru_recorder as recording:
for question in eval_questions:
response = query_engine.query(question)
records, feedback = tru.get_records_and_feedback(app_ids=[])
records.head()
# launches on http://localhost:8501/
tru.run_dashboard()
# ## Advanced RAG pipeline
# ### 1. Sentence Window retrieval
from utils import build_sentence_window_index
sentence_index = build_sentence_window_index(
document,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="sentence_index"
)
from utils import get_sentence_window_query_engine
sentence_window_engine = get_sentence_window_query_engine(sentence_index)
window_response = sentence_window_engine.query(
"how do I get started on a personal project in AI?"
)
print(str(window_response))
tru.reset_database()
tru_recorder_sentence_window = get_prebuilt_trulens_recorder(
sentence_window_engine,
app_id = "Sentence Window Query Engine"
)
for question in eval_questions:
with tru_recorder_sentence_window as recording:
response = sentence_window_engine.query(question)
print(question)
print(str(response))
tru.get_leaderboard(app_ids=[])
# launches on http://localhost:8501/
tru.run_dashboard()
# ### 2. Auto-merging retrieval
from utils import build_automerging_index
automerging_index = build_automerging_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="merging_index"
)
from utils import get_automerging_query_engine
automerging_query_engine = get_automerging_query_engine(
automerging_index,
)
auto_merging_response = automerging_query_engine.query(
"How do I build a portfolio of AI projects?"
)
print(str(auto_merging_response))
tru.reset_database()
tru_recorder_automerging = get_prebuilt_trulens_recorder(automerging_query_engine,
app_id="Automerging Query Engine")
for question in eval_questions:
with tru_recorder_automerging as recording:
response = automerging_query_engine.query(question)
print(question)
print(response)
tru.get_leaderboard(app_ids=[])
# launches on http://localhost:8501/
tru.run_dashboard()
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.Replicate",
"llama_index.llms.llama_utils.completion_to_prompt"
] | [((1239, 1307), 'huggingface_hub.hf_hub_download', 'hf_hub_download', ([], {'repo_id': 'model_name_or_path', 'filename': 'model_basename'}), '(repo_id=model_name_or_path, filename=model_basename)\n', (1254, 1307), False, 'from huggingface_hub import hf_hub_download\n'), ((2628, 2799), 'llama_index.llms.Replicate', 'Replicate', ([], {'model': 'LLAMA_13B_V2_CHAT', 'temperature': '(0.01)', 'context_window': '(4096)', 'completion_to_prompt': 'custom_completion_to_prompt', 'messages_to_prompt': 'messages_to_prompt'}), '(model=LLAMA_13B_V2_CHAT, temperature=0.01, context_window=4096,\n completion_to_prompt=custom_completion_to_prompt, messages_to_prompt=\n messages_to_prompt)\n', (2637, 2799), False, 'from llama_index.llms import Replicate\n'), ((3062, 3148), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local:BAAI/bge-small-en-v1.5"""'}), "(llm=llm, embed_model=\n 'local:BAAI/bge-small-en-v1.5')\n", (3090, 3148), False, 'from llama_index import ServiceContext, set_global_service_context\n'), ((3158, 3234), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[document]'], {'service_context': 'service_context'}), '([document], service_context=service_context)\n', (3189, 3234), False, 'from llama_index import VectorStoreIndex\n'), ((3909, 3914), 'trulens_eval.Tru', 'Tru', ([], {}), '()\n', (3912, 3914), False, 'from trulens_eval import Tru\n'), ((4002, 4075), 'utils.get_prebuilt_trulens_recorder', 'get_prebuilt_trulens_recorder', (['query_engine'], {'app_id': '"""Direct Query Engine"""'}), "(query_engine, app_id='Direct Query Engine')\n", (4031, 4075), False, 'from utils import get_prebuilt_trulens_recorder\n'), ((4503, 4621), 'utils.build_sentence_window_index', 'build_sentence_window_index', (['document', 'llm'], {'embed_model': '"""local:BAAI/bge-small-en-v1.5"""', 'save_dir': '"""sentence_index"""'}), "(document, llm, embed_model=\n 'local:BAAI/bge-small-en-v1.5', save_dir='sentence_index')\n", (4530, 4621), False, 'from utils import build_sentence_window_index\n'), ((4713, 4761), 'utils.get_sentence_window_query_engine', 'get_sentence_window_query_engine', (['sentence_index'], {}), '(sentence_index)\n', (4745, 4761), False, 'from utils import get_sentence_window_query_engine\n'), ((4951, 5048), 'utils.get_prebuilt_trulens_recorder', 'get_prebuilt_trulens_recorder', (['sentence_window_engine'], {'app_id': '"""Sentence Window Query Engine"""'}), "(sentence_window_engine, app_id=\n 'Sentence Window Query Engine')\n", (4980, 5048), False, 'from utils import get_prebuilt_trulens_recorder\n'), ((5440, 5554), 'utils.build_automerging_index', 'build_automerging_index', (['documents', 'llm'], {'embed_model': '"""local:BAAI/bge-small-en-v1.5"""', 'save_dir': '"""merging_index"""'}), "(documents, llm, embed_model=\n 'local:BAAI/bge-small-en-v1.5', save_dir='merging_index')\n", (5463, 5554), False, 'from utils import build_automerging_index\n'), ((5644, 5691), 'utils.get_automerging_query_engine', 'get_automerging_query_engine', (['automerging_index'], {}), '(automerging_index)\n', (5672, 5691), False, 'from utils import get_automerging_query_engine\n'), ((5891, 5986), 'utils.get_prebuilt_trulens_recorder', 'get_prebuilt_trulens_recorder', (['automerging_query_engine'], {'app_id': '"""Automerging Query Engine"""'}), "(automerging_query_engine, app_id=\n 'Automerging Query Engine')\n", (5920, 5986), False, 'from utils import get_prebuilt_trulens_recorder\n'), ((545, 569), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (559, 569), False, 'import os\n'), ((641, 658), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (653, 658), False, 'import requests\n'), ((1386, 1473), 'llama_cpp.Llama', 'Llama', ([], {'model_path': 'model_path', 'n_threads': '(2)', 'n_batch': '(512)', 'n_gpu_layers': '(43)', 'n_ctx': '(4096)'}), '(model_path=model_path, n_threads=2, n_batch=512, n_gpu_layers=43,\n n_ctx=4096)\n', (1391, 1473), False, 'from llama_cpp import Llama\n'), ((1772, 1831), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['"""ChanceFocus/finma-7b-full"""'], {}), "('ChanceFocus/finma-7b-full')\n", (1802, 1831), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM\n'), ((1842, 1927), 'transformers.LlamaForCausalLM.from_pretrained', 'LlamaForCausalLM.from_pretrained', (['"""ChanceFocus/finma-7b-full"""'], {'device_map': '"""auto"""'}), "('ChanceFocus/finma-7b-full', device_map='auto'\n )\n", (1874, 1927), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM\n'), ((2382, 2567), 'llama_index.llms.llama_utils.completion_to_prompt', 'completion_to_prompt', (['completion'], {'system_prompt': '"""You are a Q&A assistant. Your goal is to answer questions as accurately as possible is the instructions and context provided."""'}), "(completion, system_prompt=\n 'You are a Q&A assistant. Your goal is to answer questions as accurately as possible is the instructions and context provided.'\n )\n", (2402, 2567), False, 'from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt\n'), ((752, 797), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[filename]'}), '(input_files=[filename])\n', (773, 797), False, 'from llama_index import SimpleDirectoryReader\n')] |
"""Chat service module."""
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from app.api.database.models.message import MessageCreateModel
from app.api.services.message_service import MessageService
from app.api.services.ingest_service import ingest_service
import llama_index.core
llama_index.core.set_global_handler("simple")
class ChatService:
"""Chat Service class for chat operations."""
def __init__(self):
self.message_service = MessageService()
@staticmethod
def chat(query: str):
"""Chat with the document."""
chat_engine = ingest_service.index.as_query_engine(
similarity_top_k=5, streaming=True, verbose=False
)
streaming_response = chat_engine.query(query)
return streaming_response.response_gen
def conversation(self, query: str, session_id: str):
"""Get answer from the chat engine."""
history = self.message_service.get_messages_by_session_id(session_id)
chat_history = []
if history.messages:
for message in history.messages:
chat_history.append(
ChatMessage(
message=message.message,
role=(
MessageRole.USER
if message.sender == "user"
else MessageRole.ASSISTANT
),
)
)
memory = ChatMemoryBuffer.from_defaults(
chat_history=chat_history, token_limit=8000
)
chat_engine = ingest_service.index.as_chat_engine(
chat_mode="context",
memory=memory,
similarity_top_k=5,
verbose=False,
system_prompt=(
"""\
You are a chatbot. You MUST NOT provide any information unless it is in the Context or previous messages or general conversation. If the user ask something you don't know, say that you cannot answer. \
you MUST keep the answers short and simple. \
"""
),
)
response = chat_engine.stream_chat(message=query)
for token in response.response_gen:
yield token
self.message_service.create_message(
message=MessageCreateModel(
session_id=session_id,
message=query,
sender="user",
)
)
self.message_service.create_message(
message=MessageCreateModel(
session_id=session_id,
message=str(response),
sender="assistant",
)
)
| [
"llama_index.core.base.llms.types.ChatMessage",
"llama_index.core.memory.ChatMemoryBuffer.from_defaults"
] | [((532, 548), 'app.api.services.message_service.MessageService', 'MessageService', ([], {}), '()\n', (546, 548), False, 'from app.api.services.message_service import MessageService\n'), ((655, 746), 'app.api.services.ingest_service.ingest_service.index.as_query_engine', 'ingest_service.index.as_query_engine', ([], {'similarity_top_k': '(5)', 'streaming': '(True)', 'verbose': '(False)'}), '(similarity_top_k=5, streaming=True,\n verbose=False)\n', (691, 746), False, 'from app.api.services.ingest_service import ingest_service\n'), ((1542, 1617), 'llama_index.core.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'chat_history': 'chat_history', 'token_limit': '(8000)'}), '(chat_history=chat_history, token_limit=8000)\n', (1572, 1617), False, 'from llama_index.core.memory import ChatMemoryBuffer\n'), ((1662, 2080), 'app.api.services.ingest_service.ingest_service.index.as_chat_engine', 'ingest_service.index.as_chat_engine', ([], {'chat_mode': '"""context"""', 'memory': 'memory', 'similarity_top_k': '(5)', 'verbose': '(False)', 'system_prompt': '""" You are a chatbot. You MUST NOT provide any information unless it is in the Context or previous messages or general conversation. If the user ask something you don\'t know, say that you cannot answer. you MUST keep the answers short and simple. """'}), '(chat_mode=\'context\', memory=memory,\n similarity_top_k=5, verbose=False, system_prompt=\n " You are a chatbot. You MUST NOT provide any information unless it is in the Context or previous messages or general conversation. If the user ask something you don\'t know, say that you cannot answer. you MUST keep the answers short and simple. "\n )\n', (1697, 2080), False, 'from app.api.services.ingest_service import ingest_service\n'), ((2374, 2445), 'app.api.database.models.message.MessageCreateModel', 'MessageCreateModel', ([], {'session_id': 'session_id', 'message': 'query', 'sender': '"""user"""'}), "(session_id=session_id, message=query, sender='user')\n", (2392, 2445), False, 'from app.api.database.models.message import MessageCreateModel\n'), ((1208, 1327), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'message': 'message.message', 'role': "(MessageRole.USER if message.sender == 'user' else MessageRole.ASSISTANT)"}), "(message=message.message, role=MessageRole.USER if message.\n sender == 'user' else MessageRole.ASSISTANT)\n", (1219, 1327), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import sys
from typing import Any
import llama_index
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from loguru import logger
from private_gpt.paths import docs_path
from private_gpt.server.chat.chat_router import chat_router
from private_gpt.server.chunks.chunks_router import chunks_router
from private_gpt.server.completions.completions_router import completions_router
from private_gpt.server.embeddings.embeddings_router import embeddings_router
from private_gpt.server.health.health_router import health_router
from private_gpt.server.ingest.ingest_router import ingest_router
from private_gpt.settings.settings import settings
# Remove pre-configured logging handler
logger.remove(0)
# Create a new logging handler same as the pre-configured one but with the extra
# attribute `request_id`
logger.add(
sys.stdout,
level="INFO",
format=(
"<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | "
"<level>{level: <8}</level> | "
"<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> | "
"ID: {extra[request_id]} - <level>{message}</level>"
),
)
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
# Start the API
with open(docs_path / "description.md") as description_file:
description = description_file.read()
tags_metadata = [
{
"name": "Ingestion",
"description": "High-level APIs covering document ingestion -internally "
"managing document parsing, splitting,"
"metadata extraction, embedding generation and storage- and ingested "
"documents CRUD."
"Each ingested document is identified by an ID that can be used to filter the "
"context"
"used in *Contextual Completions* and *Context Chunks* APIs.",
},
{
"name": "Contextual Completions",
"description": "High-level APIs covering contextual Chat and Completions. They "
"follow OpenAI's format, extending it to "
"allow using the context coming from ingested documents to create the "
"response. Internally"
"manage context retrieval, prompt engineering and the response generation.",
},
{
"name": "Context Chunks",
"description": "Low-level API that given a query return relevant chunks of "
"text coming from the ingested"
"documents.",
},
{
"name": "Embeddings",
"description": "Low-level API to obtain the vector representation of a given "
"text, using an Embeddings model."
"Follows OpenAI's embeddings API format.",
},
{
"name": "Health",
"description": "Simple health API to make sure the server is up and running.",
},
]
app = FastAPI()
def custom_openapi() -> dict[str, Any]:
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title="Momentus GPT",
description=description,
version="0.1.0",
summary="PrivateGPT is a production-ready AI project that allows you to "
"ask questions to your documents using the power of Large Language "
"Models (LLMs), even in scenarios without Internet connection. "
"100% private, no data leaves your execution environment at any point.",
contact={
"url": "https://github.com/imartinez/privateGPT",
},
license_info={
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
routes=app.routes,
tags=tags_metadata,
)
openapi_schema["info"]["x-logo"] = {
"url": "https://lh3.googleusercontent.com/drive-viewer"
"/AK7aPaD_iNlMoTquOBsw4boh4tIYxyEuhz6EtEs8nzq3yNkNAK00xGj"
"E1KUCmPJSk3TYOjcs6tReG6w_cLu1S7L_gPgT9z52iw=s2560"
}
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi # type: ignore[method-assign]
app.include_router(completions_router)
app.include_router(chat_router)
app.include_router(chunks_router)
app.include_router(ingest_router)
app.include_router(embeddings_router)
app.include_router(health_router)
if settings.ui.enabled:
from private_gpt.ui.ui import mount_in_app
mount_in_app(app)
| [
"llama_index.set_global_handler"
] | [((774, 790), 'loguru.logger.remove', 'logger.remove', (['(0)'], {}), '(0)\n', (787, 790), False, 'from loguru import logger\n'), ((897, 1147), 'loguru.logger.add', 'logger.add', (['sys.stdout'], {'level': '"""INFO"""', 'format': '"""<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> | ID: {extra[request_id]} - <level>{message}</level>"""'}), "(sys.stdout, level='INFO', format=\n '<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> | ID: {extra[request_id]} - <level>{message}</level>'\n )\n", (907, 1147), False, 'from loguru import logger\n'), ((1241, 1281), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (1271, 1281), False, 'import llama_index\n'), ((2819, 2828), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (2826, 2828), False, 'from fastapi import FastAPI\n'), ((2953, 3521), 'fastapi.openapi.utils.get_openapi', 'get_openapi', ([], {'title': '"""Momentus GPT"""', 'description': 'description', 'version': '"""0.1.0"""', 'summary': '"""PrivateGPT is a production-ready AI project that allows you to ask questions to your documents using the power of Large Language Models (LLMs), even in scenarios without Internet connection. 100% private, no data leaves your execution environment at any point."""', 'contact': "{'url': 'https://github.com/imartinez/privateGPT'}", 'license_info': "{'name': 'Apache 2.0', 'url':\n 'https://www.apache.org/licenses/LICENSE-2.0.html'}", 'routes': 'app.routes', 'tags': 'tags_metadata'}), "(title='Momentus GPT', description=description, version='0.1.0',\n summary=\n 'PrivateGPT is a production-ready AI project that allows you to ask questions to your documents using the power of Large Language Models (LLMs), even in scenarios without Internet connection. 100% private, no data leaves your execution environment at any point.'\n , contact={'url': 'https://github.com/imartinez/privateGPT'},\n license_info={'name': 'Apache 2.0', 'url':\n 'https://www.apache.org/licenses/LICENSE-2.0.html'}, routes=app.routes,\n tags=tags_metadata)\n", (2964, 3521), False, 'from fastapi.openapi.utils import get_openapi\n'), ((4319, 4336), 'private_gpt.ui.ui.mount_in_app', 'mount_in_app', (['app'], {}), '(app)\n', (4331, 4336), False, 'from private_gpt.ui.ui import mount_in_app\n')] |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
from pathlib import Path
data_dir = Path("./WikiTableQuestions/csv/200-csv")
csv_files = sorted([f for f in data_dir.glob("*.csv")])
dfs = []
for csv_file in csv_files:
print(f"processing file: {csv_file}")
try:
df = pd.read_csv(csv_file)
dfs.append(df)
except Exception as e:
print(f"Error parsing {csv_file}: {str(e)}")
tableinfo_dir = "WikiTableQuestions_TableInfo"
get_ipython().system('mkdir {tableinfo_dir}')
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.llms.openai import OpenAI
class TableInfo(BaseModel):
"""Information regarding a structured table."""
table_name: str = Field(
..., description="table name (must be underscores and NO spaces)"
)
table_summary: str = Field(
..., description="short, concise summary/caption of the table"
)
prompt_str = """\
Give me a summary of the table with the following JSON format.
- The table name must be unique to the table and describe it while being concise.
- Do NOT output a generic table name (e.g. table, my_table).
Do NOT make the table name one of the following: {exclude_table_name_list}
Table:
{table_str}
Summary: """
program = LLMTextCompletionProgram.from_defaults(
output_cls=TableInfo,
llm=OpenAI(model="gpt-3.5-turbo"),
prompt_template_str=prompt_str,
)
import json
def _get_tableinfo_with_index(idx: int) -> str:
results_gen = Path(tableinfo_dir).glob(f"{idx}_*")
results_list = list(results_gen)
if len(results_list) == 0:
return None
elif len(results_list) == 1:
path = results_list[0]
return TableInfo.parse_file(path)
else:
raise ValueError(
f"More than one file matching index: {list(results_gen)}"
)
table_names = set()
table_infos = []
for idx, df in enumerate(dfs):
table_info = _get_tableinfo_with_index(idx)
if table_info:
table_infos.append(table_info)
else:
while True:
df_str = df.head(10).to_csv()
table_info = program(
table_str=df_str,
exclude_table_name_list=str(list(table_names)),
)
table_name = table_info.table_name
print(f"Processed table: {table_name}")
if table_name not in table_names:
table_names.add(table_name)
break
else:
print(f"Table name {table_name} already exists, trying again.")
pass
out_file = f"{tableinfo_dir}/{idx}_{table_name}.json"
json.dump(table_info.dict(), open(out_file, "w"))
table_infos.append(table_info)
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
)
import re
def sanitize_column_name(col_name):
return re.sub(r"\W+", "_", col_name)
def create_table_from_dataframe(
df: pd.DataFrame, table_name: str, engine, metadata_obj
):
sanitized_columns = {col: sanitize_column_name(col) for col in df.columns}
df = df.rename(columns=sanitized_columns)
columns = [
Column(col, String if dtype == "object" else Integer)
for col, dtype in zip(df.columns, df.dtypes)
]
table = Table(table_name, metadata_obj, *columns)
metadata_obj.create_all(engine)
with engine.connect() as conn:
for _, row in df.iterrows():
insert_stmt = table.insert().values(**row.to_dict())
conn.execute(insert_stmt)
conn.commit()
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
for idx, df in enumerate(dfs):
tableinfo = _get_tableinfo_with_index(idx)
print(f"Creating table: {tableinfo.table_name}")
create_table_from_dataframe(df, tableinfo.table_name, engine, metadata_obj)
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.objects import (
SQLTableNodeMapping,
ObjectIndex,
SQLTableSchema,
)
from llama_index.core import SQLDatabase, VectorStoreIndex
sql_database = SQLDatabase(engine)
table_node_mapping = SQLTableNodeMapping(sql_database)
table_schema_objs = [
SQLTableSchema(table_name=t.table_name, context_str=t.table_summary)
for t in table_infos
] # add a SQLTableSchema for each table
obj_index = ObjectIndex.from_objects(
table_schema_objs,
table_node_mapping,
VectorStoreIndex,
)
obj_retriever = obj_index.as_retriever(similarity_top_k=3)
from llama_index.core.retrievers import SQLRetriever
from typing import List
from llama_index.core.query_pipeline import FnComponent
sql_retriever = SQLRetriever(sql_database)
def get_table_context_str(table_schema_objs: List[SQLTableSchema]):
"""Get table context string."""
context_strs = []
for table_schema_obj in table_schema_objs:
table_info = sql_database.get_single_table_info(
table_schema_obj.table_name
)
if table_schema_obj.context_str:
table_opt_context = " The table description is: "
table_opt_context += table_schema_obj.context_str
table_info += table_opt_context
context_strs.append(table_info)
return "\n\n".join(context_strs)
table_parser_component = FnComponent(fn=get_table_context_str)
from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_TO_SQL_PROMPT
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import FnComponent
from llama_index.core.llms import ChatResponse
def parse_response_to_sql(response: ChatResponse) -> str:
"""Parse response to SQL."""
response = response.message.content
sql_query_start = response.find("SQLQuery:")
if sql_query_start != -1:
response = response[sql_query_start:]
if response.startswith("SQLQuery:"):
response = response[len("SQLQuery:") :]
sql_result_start = response.find("SQLResult:")
if sql_result_start != -1:
response = response[:sql_result_start]
return response.strip().strip("```").strip()
sql_parser_component = FnComponent(fn=parse_response_to_sql)
text2sql_prompt = DEFAULT_TEXT_TO_SQL_PROMPT.partial_format(
dialect=engine.dialect.name
)
print(text2sql_prompt.template)
response_synthesis_prompt_str = (
"Given an input question, synthesize a response from the query results.\n"
"Query: {query_str}\n"
"SQL: {sql_query}\n"
"SQL Response: {context_str}\n"
"Response: "
)
response_synthesis_prompt = PromptTemplate(
response_synthesis_prompt_str,
)
llm = OpenAI(model="gpt-3.5-turbo")
from llama_index.core.query_pipeline import (
QueryPipeline as QP,
Link,
InputComponent,
CustomQueryComponent,
)
qp = QP(
modules={
"input": InputComponent(),
"table_retriever": obj_retriever,
"table_output_parser": table_parser_component,
"text2sql_prompt": text2sql_prompt,
"text2sql_llm": llm,
"sql_output_parser": sql_parser_component,
"sql_retriever": sql_retriever,
"response_synthesis_prompt": response_synthesis_prompt,
"response_synthesis_llm": llm,
},
verbose=True,
)
qp.add_chain(["input", "table_retriever", "table_output_parser"])
qp.add_link("input", "text2sql_prompt", dest_key="query_str")
qp.add_link("table_output_parser", "text2sql_prompt", dest_key="schema")
qp.add_chain(
["text2sql_prompt", "text2sql_llm", "sql_output_parser", "sql_retriever"]
)
qp.add_link(
"sql_output_parser", "response_synthesis_prompt", dest_key="sql_query"
)
qp.add_link(
"sql_retriever", "response_synthesis_prompt", dest_key="context_str"
)
qp.add_link("input", "response_synthesis_prompt", dest_key="query_str")
qp.add_link("response_synthesis_prompt", "response_synthesis_llm")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.dag)
net.show("text2sql_dag.html")
response = qp.run(
query="What was the year that The Notorious B.I.G was signed to Bad Boy?"
)
print(str(response))
response = qp.run(query="Who won best director in the 1972 academy awards")
print(str(response))
response = qp.run(query="What was the term of Pasquale Preziosa?")
print(str(response))
from llama_index.core import VectorStoreIndex, load_index_from_storage
from sqlalchemy import text
from llama_index.core.schema import TextNode
from llama_index.core import StorageContext
import os
from pathlib import Path
from typing import Dict
def index_all_tables(
sql_database: SQLDatabase, table_index_dir: str = "table_index_dir"
) -> Dict[str, VectorStoreIndex]:
"""Index all tables."""
if not Path(table_index_dir).exists():
os.makedirs(table_index_dir)
vector_index_dict = {}
engine = sql_database.engine
for table_name in sql_database.get_usable_table_names():
print(f"Indexing rows in table: {table_name}")
if not os.path.exists(f"{table_index_dir}/{table_name}"):
with engine.connect() as conn:
cursor = conn.execute(text(f'SELECT * FROM "{table_name}"'))
result = cursor.fetchall()
row_tups = []
for row in result:
row_tups.append(tuple(row))
nodes = [TextNode(text=str(t)) for t in row_tups]
index = VectorStoreIndex(nodes)
index.set_index_id("vector_index")
index.storage_context.persist(f"{table_index_dir}/{table_name}")
else:
storage_context = StorageContext.from_defaults(
persist_dir=f"{table_index_dir}/{table_name}"
)
index = load_index_from_storage(
storage_context, index_id="vector_index"
)
vector_index_dict[table_name] = index
return vector_index_dict
vector_index_dict = index_all_tables(sql_database)
test_retriever = vector_index_dict["Bad_Boy_Artists"].as_retriever(
similarity_top_k=1
)
nodes = test_retriever.retrieve("P. Diddy")
print(nodes[0].get_content())
from llama_index.core.retrievers import SQLRetriever
from typing import List
from llama_index.core.query_pipeline import FnComponent
sql_retriever = SQLRetriever(sql_database)
def get_table_context_and_rows_str(
query_str: str, table_schema_objs: List[SQLTableSchema]
):
"""Get table context string."""
context_strs = []
for table_schema_obj in table_schema_objs:
table_info = sql_database.get_single_table_info(
table_schema_obj.table_name
)
if table_schema_obj.context_str:
table_opt_context = " The table description is: "
table_opt_context += table_schema_obj.context_str
table_info += table_opt_context
vector_retriever = vector_index_dict[
table_schema_obj.table_name
].as_retriever(similarity_top_k=2)
relevant_nodes = vector_retriever.retrieve(query_str)
if len(relevant_nodes) > 0:
table_row_context = "\nHere are some relevant example rows (values in the same order as columns above)\n"
for node in relevant_nodes:
table_row_context += str(node.get_content()) + "\n"
table_info += table_row_context
context_strs.append(table_info)
return "\n\n".join(context_strs)
table_parser_component = FnComponent(fn=get_table_context_and_rows_str)
from llama_index.core.query_pipeline import (
QueryPipeline as QP,
Link,
InputComponent,
CustomQueryComponent,
)
qp = QP(
modules={
"input": InputComponent(),
"table_retriever": obj_retriever,
"table_output_parser": table_parser_component,
"text2sql_prompt": text2sql_prompt,
"text2sql_llm": llm,
"sql_output_parser": sql_parser_component,
"sql_retriever": sql_retriever,
"response_synthesis_prompt": response_synthesis_prompt,
"response_synthesis_llm": llm,
},
verbose=True,
)
qp.add_link("input", "table_retriever")
qp.add_link("input", "table_output_parser", dest_key="query_str")
qp.add_link(
"table_retriever", "table_output_parser", dest_key="table_schema_objs"
)
qp.add_link("input", "text2sql_prompt", dest_key="query_str")
qp.add_link("table_output_parser", "text2sql_prompt", dest_key="schema")
qp.add_chain(
["text2sql_prompt", "text2sql_llm", "sql_output_parser", "sql_retriever"]
)
qp.add_link(
"sql_output_parser", "response_synthesis_prompt", dest_key="sql_query"
)
qp.add_link(
"sql_retriever", "response_synthesis_prompt", dest_key="context_str"
)
qp.add_link("input", "response_synthesis_prompt", dest_key="query_str")
qp.add_link("response_synthesis_prompt", "response_synthesis_llm")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.dag)
net.show("text2sql_dag.html")
response = qp.run(
query="What was the year that The Notorious BIG was signed to Bad Boy?"
)
print(str(response)) | [
"llama_index.core.SQLDatabase",
"llama_index.llms.openai.OpenAI",
"llama_index.core.query_pipeline.FnComponent",
"llama_index.core.VectorStoreIndex",
"llama_index.core.objects.SQLTableNodeMapping",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.query_pipeline.InputComponent",
"llama_index.core.prompts.default_prompts.DEFAULT_TEXT_TO_SQL_PROMPT.partial_format",
"llama_index.core.objects.SQLTableSchema",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.PromptTemplate",
"llama_index.core.objects.ObjectIndex.from_objects",
"llama_index.core.retrievers.SQLRetriever"
] | [((323, 363), 'pathlib.Path', 'Path', (['"""./WikiTableQuestions/csv/200-csv"""'], {}), "('./WikiTableQuestions/csv/200-csv')\n", (327, 363), False, 'from pathlib import Path\n'), ((3874, 3909), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///:memory:"""'], {}), "('sqlite:///:memory:')\n", (3887, 3909), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer\n'), ((3925, 3935), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (3933, 3935), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer\n'), ((4195, 4210), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (4208, 4210), True, 'import phoenix as px\n'), ((4446, 4465), 'llama_index.core.SQLDatabase', 'SQLDatabase', (['engine'], {}), '(engine)\n', (4457, 4465), False, 'from llama_index.core import SQLDatabase, VectorStoreIndex\n'), ((4488, 4521), 'llama_index.core.objects.SQLTableNodeMapping', 'SQLTableNodeMapping', (['sql_database'], {}), '(sql_database)\n', (4507, 4521), False, 'from llama_index.core.objects import SQLTableNodeMapping, ObjectIndex, SQLTableSchema\n'), ((4696, 4781), 'llama_index.core.objects.ObjectIndex.from_objects', 'ObjectIndex.from_objects', (['table_schema_objs', 'table_node_mapping', 'VectorStoreIndex'], {}), '(table_schema_objs, table_node_mapping,\n VectorStoreIndex)\n', (4720, 4781), False, 'from llama_index.core.objects import SQLTableNodeMapping, ObjectIndex, SQLTableSchema\n'), ((5005, 5031), 'llama_index.core.retrievers.SQLRetriever', 'SQLRetriever', (['sql_database'], {}), '(sql_database)\n', (5017, 5031), False, 'from llama_index.core.retrievers import SQLRetriever\n'), ((5628, 5665), 'llama_index.core.query_pipeline.FnComponent', 'FnComponent', ([], {'fn': 'get_table_context_str'}), '(fn=get_table_context_str)\n', (5639, 5665), False, 'from llama_index.core.query_pipeline import FnComponent\n'), ((6454, 6491), 'llama_index.core.query_pipeline.FnComponent', 'FnComponent', ([], {'fn': 'parse_response_to_sql'}), '(fn=parse_response_to_sql)\n', (6465, 6491), False, 'from llama_index.core.query_pipeline import FnComponent\n'), ((6511, 6581), 'llama_index.core.prompts.default_prompts.DEFAULT_TEXT_TO_SQL_PROMPT.partial_format', 'DEFAULT_TEXT_TO_SQL_PROMPT.partial_format', ([], {'dialect': 'engine.dialect.name'}), '(dialect=engine.dialect.name)\n', (6552, 6581), False, 'from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_TO_SQL_PROMPT\n'), ((6871, 6916), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['response_synthesis_prompt_str'], {}), '(response_synthesis_prompt_str)\n', (6885, 6916), False, 'from llama_index.core import PromptTemplate\n'), ((6932, 6961), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (6938, 6961), False, 'from llama_index.llms.openai import OpenAI\n'), ((8202, 8264), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (8209, 8264), False, 'from pyvis.network import Network\n'), ((10585, 10611), 'llama_index.core.retrievers.SQLRetriever', 'SQLRetriever', (['sql_database'], {}), '(sql_database)\n', (10597, 10611), False, 'from llama_index.core.retrievers import SQLRetriever\n'), ((11737, 11783), 'llama_index.core.query_pipeline.FnComponent', 'FnComponent', ([], {'fn': 'get_table_context_and_rows_str'}), '(fn=get_table_context_and_rows_str)\n', (11748, 11783), False, 'from llama_index.core.query_pipeline import FnComponent\n'), ((13153, 13215), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (13160, 13215), False, 'from pyvis.network import Network\n'), ((1015, 1087), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""table name (must be underscores and NO spaces)"""'}), "(..., description='table name (must be underscores and NO spaces)')\n", (1020, 1087), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1127, 1196), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""short, concise summary/caption of the table"""'}), "(..., description='short, concise summary/caption of the table')\n", (1132, 1196), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3182, 3211), 're.sub', 're.sub', (['"""\\\\W+"""', '"""_"""', 'col_name'], {}), "('\\\\W+', '_', col_name)\n", (3188, 3211), False, 'import re\n'), ((3586, 3627), 'sqlalchemy.Table', 'Table', (['table_name', 'metadata_obj', '*columns'], {}), '(table_name, metadata_obj, *columns)\n', (3591, 3627), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer\n'), ((4548, 4616), 'llama_index.core.objects.SQLTableSchema', 'SQLTableSchema', ([], {'table_name': 't.table_name', 'context_str': 't.table_summary'}), '(table_name=t.table_name, context_str=t.table_summary)\n', (4562, 4616), False, 'from llama_index.core.objects import SQLTableNodeMapping, ObjectIndex, SQLTableSchema\n'), ((520, 541), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (531, 541), True, 'import pandas as pd\n'), ((1634, 1663), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (1640, 1663), False, 'from llama_index.llms.openai import OpenAI\n'), ((3460, 3513), 'sqlalchemy.Column', 'Column', (['col', "(String if dtype == 'object' else Integer)"], {}), "(col, String if dtype == 'object' else Integer)\n", (3466, 3513), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer\n'), ((9087, 9115), 'os.makedirs', 'os.makedirs', (['table_index_dir'], {}), '(table_index_dir)\n', (9098, 9115), False, 'import os\n'), ((1785, 1804), 'pathlib.Path', 'Path', (['tableinfo_dir'], {}), '(tableinfo_dir)\n', (1789, 1804), False, 'from pathlib import Path\n'), ((7135, 7151), 'llama_index.core.query_pipeline.InputComponent', 'InputComponent', ([], {}), '()\n', (7149, 7151), False, 'from llama_index.core.query_pipeline import QueryPipeline as QP, Link, InputComponent, CustomQueryComponent\n'), ((9308, 9357), 'os.path.exists', 'os.path.exists', (['f"""{table_index_dir}/{table_name}"""'], {}), "(f'{table_index_dir}/{table_name}')\n", (9322, 9357), False, 'import os\n'), ((9719, 9742), 'llama_index.core.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {}), '(nodes)\n', (9735, 9742), False, 'from llama_index.core import VectorStoreIndex, load_index_from_storage\n'), ((9912, 9987), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'f"""{table_index_dir}/{table_name}"""'}), "(persist_dir=f'{table_index_dir}/{table_name}')\n", (9940, 9987), False, 'from llama_index.core import StorageContext\n'), ((10038, 10103), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'index_id': '"""vector_index"""'}), "(storage_context, index_id='vector_index')\n", (10061, 10103), False, 'from llama_index.core import VectorStoreIndex, load_index_from_storage\n'), ((11957, 11973), 'llama_index.core.query_pipeline.InputComponent', 'InputComponent', ([], {}), '()\n', (11971, 11973), False, 'from llama_index.core.query_pipeline import QueryPipeline as QP, Link, InputComponent, CustomQueryComponent\n'), ((9047, 9068), 'pathlib.Path', 'Path', (['table_index_dir'], {}), '(table_index_dir)\n', (9051, 9068), False, 'from pathlib import Path\n'), ((9440, 9477), 'sqlalchemy.text', 'text', (['f"""SELECT * FROM "{table_name}\\""""'], {}), '(f\'SELECT * FROM "{table_name}"\')\n', (9444, 9477), False, 'from sqlalchemy import text\n')] |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = ObservationReasoningStep(observation=task.input)
state["current_reasoning"].append(reasoning_step)
return {"input": task.input}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.query_pipeline import InputComponent, Link
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
def react_prompt_fn(
task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool]
) -> List[ChatMessage]:
chat_formatter = ReActChatFormatter()
return chat_formatter.format(
tools,
chat_history=task.memory.get() + state["memory"].get_all(),
current_reasoning=state["current_reasoning"],
)
react_prompt_component = AgentFnComponent(
fn=react_prompt_fn, partial_dict={"tools": [sql_tool]}
)
from typing import Set, Optional
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.llms import ChatResponse
from llama_index.core.agent.types import Task
def parse_react_output_fn(
task: Task, state: Dict[str, Any], chat_response: ChatResponse
):
"""Parse ReAct output into a reasoning step."""
output_parser = ReActOutputParser()
reasoning_step = output_parser.parse(chat_response.message.content)
return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step}
parse_react_output = AgentFnComponent(fn=parse_react_output_fn)
def run_tool_fn(
task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep
):
"""Run tool and process tool output."""
tool_runner_component = ToolRunnerComponent(
[sql_tool], callback_manager=task.callback_manager
)
tool_output = tool_runner_component.run_component(
tool_name=reasoning_step.action,
tool_input=reasoning_step.action_input,
)
observation_step = ObservationReasoningStep(observation=str(tool_output))
state["current_reasoning"].append(observation_step)
return {"response_str": observation_step.get_content(), "is_done": False}
run_tool = AgentFnComponent(fn=run_tool_fn)
def process_response_fn(
task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep
):
"""Process response."""
state["current_reasoning"].append(response_step)
response_str = response_step.response
state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER))
state["memory"].put(
ChatMessage(content=response_str, role=MessageRole.ASSISTANT)
)
return {"response_str": response_str, "is_done": True}
process_response = AgentFnComponent(fn=process_response_fn)
def process_agent_response_fn(
task: Task, state: Dict[str, Any], response_dict: dict
):
"""Process agent response."""
return (
AgentChatResponse(response_dict["response_str"]),
response_dict["is_done"],
)
process_agent_response = AgentFnComponent(fn=process_agent_response_fn)
from llama_index.core.query_pipeline import QueryPipeline as QP
from llama_index.llms.openai import OpenAI
qp.add_modules(
{
"agent_input": agent_input_component,
"react_prompt": react_prompt_component,
"llm": OpenAI(model="gpt-4-1106-preview"),
"react_output_parser": parse_react_output,
"run_tool": run_tool,
"process_response": process_response,
"process_agent_response": process_agent_response,
}
)
qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"])
qp.add_link(
"react_output_parser",
"run_tool",
condition_fn=lambda x: not x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link(
"react_output_parser",
"process_response",
condition_fn=lambda x: x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link("process_response", "process_agent_response")
qp.add_link("run_tool", "process_agent_response")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.clean_dag)
net.show("agent_dag.html")
from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner
from llama_index.core.callbacks import CallbackManager
agent_worker = QueryPipelineAgentWorker(qp)
agent = AgentRunner(
agent_worker, callback_manager=CallbackManager([]), verbose=True
)
task = agent.create_task(
"What are some tracks from the artist AC/DC? Limit it to 3"
)
step_output = agent.run_step(task.task_id)
step_output = agent.run_step(task.task_id)
step_output.is_last
response = agent.finalize_response(task.task_id)
print(str(response))
agent.reset()
response = agent.chat(
"What are some tracks from the artist AC/DC? Limit it to 3"
)
print(str(response))
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
from llama_index.core.agent import Task, AgentChatResponse
from typing import Dict, Any
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
)
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict:
"""Agent input function."""
if "convo_history" not in state:
state["convo_history"] = []
state["count"] = 0
state["convo_history"].append(f"User: {task.input}")
convo_history_str = "\n".join(state["convo_history"]) or "None"
return {"input": task.input, "convo_history": convo_history_str}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core import PromptTemplate
retry_prompt_str = """\
You are trying to generate a proper natural language query given a user input.
This query will then be interpreted by a downstream text-to-SQL agent which
will convert the query to a SQL statement. If the agent triggers an error,
then that will be reflected in the current conversation history (see below).
If the conversation history is None, use the user input. If its not None,
generate a new SQL query that avoids the problems of the previous SQL query.
Input: {input}
Convo history (failed attempts):
{convo_history}
New input: """
retry_prompt = PromptTemplate(retry_prompt_str)
from llama_index.core import Response
from typing import Tuple
validate_prompt_str = """\
Given the user query, validate whether the inferred SQL query and response from executing the query is correct and answers the query.
Answer with YES or NO.
Query: {input}
Inferred SQL query: {sql_query}
SQL Response: {sql_response}
Result: """
validate_prompt = PromptTemplate(validate_prompt_str)
MAX_ITER = 3
def agent_output_fn(
task: Task, state: Dict[str, Any], output: Response
) -> Tuple[AgentChatResponse, bool]:
"""Agent output component."""
print(f"> Inferred SQL Query: {output.metadata['sql_query']}")
print(f"> SQL Response: {str(output)}")
state["convo_history"].append(
f"Assistant (inferred SQL query): {output.metadata['sql_query']}"
)
state["convo_history"].append(f"Assistant (response): {str(output)}")
validate_prompt_partial = validate_prompt.as_query_component(
partial={
"sql_query": output.metadata["sql_query"],
"sql_response": str(output),
}
)
qp = QP(chain=[validate_prompt_partial, llm])
validate_output = qp.run(input=task.input)
state["count"] += 1
is_done = False
if state["count"] >= MAX_ITER:
is_done = True
if "YES" in validate_output.message.content:
is_done = True
return AgentChatResponse(response=str(output)), is_done
agent_output_component = AgentFnComponent(fn=agent_output_fn)
from llama_index.core.query_pipeline import (
QueryPipeline as QP,
Link,
InputComponent,
)
qp = QP(
modules={
"input": agent_input_component,
"retry_prompt": retry_prompt,
"llm": llm,
"sql_query_engine": sql_query_engine,
"output_component": agent_output_component,
},
verbose=True,
)
qp.add_link("input", "retry_prompt", src_key="input", dest_key="input")
qp.add_link(
"input", "retry_prompt", src_key="convo_history", dest_key="convo_history"
)
qp.add_chain(["retry_prompt", "llm", "sql_query_engine", "output_component"])
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.dag)
net.show("agent_dag.html")
from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner
from llama_index.core.callbacks import CallbackManager
agent_worker = QueryPipelineAgentWorker(qp)
agent = AgentRunner(
agent_worker, callback_manager=CallbackManager(), verbose=False
)
response = agent.chat(
"How many albums did the artist who wrote 'Restless and Wild' release? (answer should be non-zero)?"
)
print(str(response)) | [
"llama_index.core.SQLDatabase",
"llama_index.core.agent.react.types.ObservationReasoningStep",
"llama_index.llms.openai.OpenAI",
"llama_index.core.llms.ChatMessage",
"llama_index.core.tools.QueryEngineTool.from_defaults",
"llama_index.core.agent.QueryPipelineAgentWorker",
"llama_index.core.agent.ReActChatFormatter",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.query_pipeline.AgentInputComponent",
"llama_index.core.agent.react.output_parser.ReActOutputParser",
"llama_index.core.PromptTemplate",
"llama_index.core.query_pipeline.QueryPipeline",
"llama_index.core.agent.AgentChatResponse",
"llama_index.core.query_engine.NLSQLTableQueryEngine",
"llama_index.core.query_pipeline.AgentFnComponent",
"llama_index.core.query_pipeline.ToolRunnerComponent"
] | [((183, 220), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///chinook.db"""'], {}), "('sqlite:///chinook.db')\n", (196, 220), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, select, column\n'), ((236, 255), 'llama_index.core.SQLDatabase', 'SQLDatabase', (['engine'], {}), '(engine)\n', (247, 255), False, 'from llama_index.core import SQLDatabase\n'), ((679, 696), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (694, 696), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((790, 805), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (803, 805), True, 'import phoenix as px\n'), ((997, 1103), 'llama_index.core.query_engine.NLSQLTableQueryEngine', 'NLSQLTableQueryEngine', ([], {'sql_database': 'sql_database', 'tables': "['albums', 'tracks', 'artists']", 'verbose': '(True)'}), "(sql_database=sql_database, tables=['albums', 'tracks',\n 'artists'], verbose=True)\n", (1018, 1103), False, 'from llama_index.core.query_engine import NLSQLTableQueryEngine\n'), ((1126, 1293), 'llama_index.core.tools.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'sql_query_engine', 'name': '"""sql_tool"""', 'description': '"""Useful for translating a natural language query into a SQL query"""'}), "(query_engine=sql_query_engine, name=\n 'sql_tool', description=\n 'Useful for translating a natural language query into a SQL query')\n", (1155, 1293), False, 'from llama_index.core.tools import QueryEngineTool\n'), ((1388, 1404), 'llama_index.core.query_pipeline.QueryPipeline', 'QP', ([], {'verbose': '(True)'}), '(verbose=True)\n', (1390, 1404), True, 'from llama_index.core.query_pipeline import QueryPipeline as QP, Link, InputComponent\n'), ((2476, 2514), 'llama_index.core.query_pipeline.AgentInputComponent', 'AgentInputComponent', ([], {'fn': 'agent_input_fn'}), '(fn=agent_input_fn)\n', (2495, 2514), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((3093, 3165), 'llama_index.core.query_pipeline.AgentFnComponent', 'AgentFnComponent', ([], {'fn': 'react_prompt_fn', 'partial_dict': "{'tools': [sql_tool]}"}), "(fn=react_prompt_fn, partial_dict={'tools': [sql_tool]})\n", (3109, 3165), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((3738, 3780), 'llama_index.core.query_pipeline.AgentFnComponent', 'AgentFnComponent', ([], {'fn': 'parse_react_output_fn'}), '(fn=parse_react_output_fn)\n', (3754, 3780), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((4412, 4444), 'llama_index.core.query_pipeline.AgentFnComponent', 'AgentFnComponent', ([], {'fn': 'run_tool_fn'}), '(fn=run_tool_fn)\n', (4428, 4444), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((4936, 4976), 'llama_index.core.query_pipeline.AgentFnComponent', 'AgentFnComponent', ([], {'fn': 'process_response_fn'}), '(fn=process_response_fn)\n', (4952, 4976), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((5244, 5290), 'llama_index.core.query_pipeline.AgentFnComponent', 'AgentFnComponent', ([], {'fn': 'process_agent_response_fn'}), '(fn=process_agent_response_fn)\n', (5260, 5290), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((6286, 6348), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (6293, 6348), False, 'from pyvis.network import Network\n'), ((6549, 6577), 'llama_index.core.agent.QueryPipelineAgentWorker', 'QueryPipelineAgentWorker', (['qp'], {}), '(qp)\n', (6573, 6577), False, 'from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner\n'), ((7133, 7167), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""'}), "(model='gpt-4-1106-preview')\n", (7139, 7167), False, 'from llama_index.llms.openai import OpenAI\n'), ((7770, 7808), 'llama_index.core.query_pipeline.AgentInputComponent', 'AgentInputComponent', ([], {'fn': 'agent_input_fn'}), '(fn=agent_input_fn)\n', (7789, 7808), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((8437, 8469), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['retry_prompt_str'], {}), '(retry_prompt_str)\n', (8451, 8469), False, 'from llama_index.core import PromptTemplate\n'), ((8829, 8864), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['validate_prompt_str'], {}), '(validate_prompt_str)\n', (8843, 8864), False, 'from llama_index.core import PromptTemplate\n'), ((9886, 9922), 'llama_index.core.query_pipeline.AgentFnComponent', 'AgentFnComponent', ([], {'fn': 'agent_output_fn'}), '(fn=agent_output_fn)\n', (9902, 9922), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((10034, 10224), 'llama_index.core.query_pipeline.QueryPipeline', 'QP', ([], {'modules': "{'input': agent_input_component, 'retry_prompt': retry_prompt, 'llm': llm,\n 'sql_query_engine': sql_query_engine, 'output_component':\n agent_output_component}", 'verbose': '(True)'}), "(modules={'input': agent_input_component, 'retry_prompt': retry_prompt,\n 'llm': llm, 'sql_query_engine': sql_query_engine, 'output_component':\n agent_output_component}, verbose=True)\n", (10036, 10224), True, 'from llama_index.core.query_pipeline import QueryPipeline as QP, Link, InputComponent\n'), ((10563, 10625), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (10570, 10625), False, 'from pyvis.network import Network\n'), ((10820, 10848), 'llama_index.core.agent.QueryPipelineAgentWorker', 'QueryPipelineAgentWorker', (['qp'], {}), '(qp)\n', (10844, 10848), False, 'from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner\n'), ((2314, 2362), 'llama_index.core.agent.react.types.ObservationReasoningStep', 'ObservationReasoningStep', ([], {'observation': 'task.input'}), '(observation=task.input)\n', (2338, 2362), False, 'from llama_index.core.agent.react.types import ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep\n'), ((2868, 2888), 'llama_index.core.agent.ReActChatFormatter', 'ReActChatFormatter', ([], {}), '()\n', (2886, 2888), False, 'from llama_index.core.agent import ReActChatFormatter\n'), ((3545, 3564), 'llama_index.core.agent.react.output_parser.ReActOutputParser', 'ReActOutputParser', ([], {}), '()\n', (3562, 3564), False, 'from llama_index.core.agent.react.output_parser import ReActOutputParser\n'), ((3950, 4021), 'llama_index.core.query_pipeline.ToolRunnerComponent', 'ToolRunnerComponent', (['[sql_tool]'], {'callback_manager': 'task.callback_manager'}), '([sql_tool], callback_manager=task.callback_manager)\n', (3969, 4021), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent\n'), ((9535, 9575), 'llama_index.core.query_pipeline.QueryPipeline', 'QP', ([], {'chain': '[validate_prompt_partial, llm]'}), '(chain=[validate_prompt_partial, llm])\n', (9537, 9575), True, 'from llama_index.core.query_pipeline import QueryPipeline as QP, Link, InputComponent\n'), ((4698, 4752), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'content': 'task.input', 'role': 'MessageRole.USER'}), '(content=task.input, role=MessageRole.USER)\n', (4709, 4752), False, 'from llama_index.core.llms import ChatMessage\n'), ((4787, 4848), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'content': 'response_str', 'role': 'MessageRole.ASSISTANT'}), '(content=response_str, role=MessageRole.ASSISTANT)\n', (4798, 4848), False, 'from llama_index.core.llms import ChatMessage\n'), ((5127, 5175), 'llama_index.core.agent.AgentChatResponse', 'AgentChatResponse', (["response_dict['response_str']"], {}), "(response_dict['response_str'])\n", (5144, 5175), False, 'from llama_index.core.agent import Task, AgentChatResponse\n'), ((5533, 5567), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""'}), "(model='gpt-4-1106-preview')\n", (5539, 5567), False, 'from llama_index.llms.openai import OpenAI\n'), ((6634, 6653), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6649, 6653), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((10905, 10922), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (10920, 10922), False, 'from llama_index.core.callbacks import CallbackManager\n')] |
import requests
import pandas as pd
from bs4 import BeautifulSoup
import os
from llama_index import SimpleDirectoryReader,GPTListIndex,GPTVectorStoreIndex,LLMPredictor,PromptHelper,ServiceContext,StorageContext
from langchain import OpenAI
import openai
import llama_index
# from main import secret_key
# with open('key.txt','r') as f:
# secret_key=f.read().strip()
# os.environ["OPENAI_API_KEY"]=secret_key
# secret_key = os.getenv('api_key')
from langchain import OpenAI
Base_Dir=os.getcwd()
from PyPDF2 import PdfReader,PdfWriter
def api_status(key):
# Set your OpenAI API key
# os.environ('OPENAI_API')
# openai.api_key="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"
openai.api_key=key
# Try to create a completion
try:
response = openai.Completion.create(
engine="text-davinci-001",
prompt="What is the meaning of life?",
temperature=0.5,
max_tokens=60,
top_p=0.3,
frequency_penalty=0.5,
presence_penalty=0.0,
)
except openai.OpenAIError as e:
return False
else:
return True
def get_chat_response(question,api_key):
# API endpoint
url = 'https://api.openai.com/v1/chat/completions'
# Your OpenAI API key
# api_key = secret_key
# Request headers
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {api_key}'
}
# Request payload
payload = {
'model': 'gpt-3.5-turbo',
'messages': [{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': question}]
}
# Send POST request to the API
response = requests.post(url, headers=headers, json=payload)
# Parse the response
data = response.json()
# return data
try:
reply= data['choices'][0]['message']['content']
# Get the model's reply
# reply = data['choices'][0]['message']['content']
return reply
except Exception as e:
print(e)
return None
def company_with_url(company_name):
csv_path=os.path.join(Base_Dir,'companies_data.csv')
from sentence_transformers import SentenceTransformer,util
encode_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
df=pd.read_csv(csv_path)
companies=list(df['company'])
companies_urls=list(df['screener url'])
encoded_names=encode_model.encode(companies)
cos=util.cos_sim(encode_model.encode(company_name.split()[0]),encoded_names)
similar=list(map(lambda x:x.items,cos[0]))
index=similar.index(max(similar))
# m=0
# index=0
# for i in range(len(cos[0])):
# if m<cos[0][i].item():
# index=i
# m=cos[0][i]
company=companies[index]
screener_url=companies_urls[index]
return (company,screener_url)
def report_url(url):
soup_annual=BeautifulSoup(requests.get(url).content,'html.parser')
annual_urls=[i.get('href') for i in soup_annual.find_all('a')]
annual_reports=[]
for i in annual_urls:
if 'Annual' in i and '#' not in i:
annual_reports.append(i)
annual_report_2022=annual_reports[0]
return annual_report_2022
def autodownload_report(url,company):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Safari/537.36'
}
response = requests.get(url, stream=True,headers=headers)
folder_path=os.path.join(Base_Dir,f'Annual_reports/{company}_report')
if not os.path.exists(folder_path):
os.mkdir(folder_path)
print('folder created')
pdf_path=os.path.join(Base_Dir,f'{company}_2022.pdf')
# print(pdf_path)
with open(pdf_path, "wb") as f:
for chunk in response.iter_content(chunk_size=1024):
f.write(chunk)
return
def pdf2txt(pdf_path,company):
if not os.path.exists(os.path.join(Base_Dir,f'Annual_reports/{company}_report/{company}_2022.txt')):
titles = ['STANDALONE BALANCE SHEET', 'STANDALONE STATEMENT OF PROFIT AND LOSS', 'Balance Sheet', 'Balance Sheet (contd.)', 'Statement of Profit and Loss', 'Statement of Profit and Loss (contd.)']
with open(pdf_path, 'rb') as pdf_file:
# Create a PDF reader object
pdf_reader = PdfReader(pdf_file)
text=''
pdf_writer = PdfWriter()
page_no=0
for page in pdf_reader.pages:
page_content=page.extract_text()
page_no+=1
for word in titles:
if word in page_content:
# print(page_no)
text+=page.extract_text()
pdf_writer.add_page(page)
with open(f'{company}_imp.pdf', 'wb') as output_file:
pdf_writer.write(output_file)
txt_path=os.path.join(Base_Dir,f'Annual_reports/{company}_report/{company}_2022.txt')
with open(txt_path,'w',encoding='utf-8') as f:
f.write(text)
print('created txt file')
pdf_path=os.path.join(Base_Dir,f'{company}_2022.pdf')
os.remove(pdf_path)
print('removed pdf file')
return
import base64
def display_pdf(pdf_file):
with open(pdf_file, "rb") as f:
base64_pdf = base64.b64encode(f.read()).decode('utf-8')
pdf_display = f'<iframe src="data:application/pdf;base64,{base64_pdf}" width="700" height="500" type="application/pdf"></iframe>'
return pdf_display
def create_index(company,secret_key):
import openai
vstore_path=os.path.join(Base_Dir,f'vector_stores/{company}_vstore')
doc_path=os.path.join(Base_Dir,f'Annual_reports/{company}_report')
if not os.path.exists(vstore_path):
os.mkdir(vstore_path)
max_input=4096
tokens=200
chunk_size=600
max_chunk_overlap=20
promptHelpter=PromptHelper(max_input,max_chunk_overlap,chunk_size_limit=chunk_size)
openai.api_key=secret_key
llmPredictor=LLMPredictor(llm=OpenAI(temperature=0,model_name='text-ada-001',max_tokens=tokens))
docs=SimpleDirectoryReader(doc_path).load_data()
service_context=ServiceContext.from_defaults(llm_predictor=llmPredictor,prompt_helper=promptHelpter)
openai.api_key=secret_key
vectorIndex=GPTVectorStoreIndex.from_documents(documents=docs)
vectorIndex.storage_context.persist(persist_dir=vstore_path)
return
def load_index(vstore_path):
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=vstore_path)
# load index
index = llama_index.load_index_from_storage(storage_context)
return index
# print(index)
def give_answer(index,que):
return index.as_query_engine().query(que)
def answerMe(question,company):
vstore_path=os.path.join(Base_Dir,f'vector_stores/{company}_vstore')
storage_context=StorageContext.from_defaults(persist_dir=vstore_path)
# index=load_index_from_storage(storage_context)
index=llama_index.load_index_from_storage(storage_context)
query_engine=index.as_query_engine()
response=query_engine.query(question)
return response.response
def balance(url):
dfs = pd.read_html(url)
return dfs[6]
def shareholding(url):
dfs = pd.read_html(url)
return dfs[10]
def balance(url):
dfs = pd.read_html(url)
return dfs[6]
| [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper",
"llama_index.load_index_from_storage",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((490, 501), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (499, 501), False, 'import os\n'), ((1741, 1790), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'json': 'payload'}), '(url, headers=headers, json=payload)\n', (1754, 1790), False, 'import requests\n'), ((2167, 2211), 'os.path.join', 'os.path.join', (['Base_Dir', '"""companies_data.csv"""'], {}), "(Base_Dir, 'companies_data.csv')\n", (2179, 2211), False, 'import os\n'), ((2293, 2339), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""paraphrase-MiniLM-L6-v2"""'], {}), "('paraphrase-MiniLM-L6-v2')\n", (2312, 2339), False, 'from sentence_transformers import SentenceTransformer, util\n'), ((2347, 2368), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (2358, 2368), True, 'import pandas as pd\n'), ((3497, 3544), 'requests.get', 'requests.get', (['url'], {'stream': '(True)', 'headers': 'headers'}), '(url, stream=True, headers=headers)\n', (3509, 3544), False, 'import requests\n'), ((3561, 3619), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""Annual_reports/{company}_report"""'], {}), "(Base_Dir, f'Annual_reports/{company}_report')\n", (3573, 3619), False, 'import os\n'), ((5768, 5825), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""vector_stores/{company}_vstore"""'], {}), "(Base_Dir, f'vector_stores/{company}_vstore')\n", (5780, 5825), False, 'import os\n'), ((5838, 5896), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""Annual_reports/{company}_report"""'], {}), "(Base_Dir, f'Annual_reports/{company}_report')\n", (5850, 5896), False, 'import os\n'), ((6782, 6835), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'vstore_path'}), '(persist_dir=vstore_path)\n', (6810, 6835), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n'), ((6865, 6917), 'llama_index.load_index_from_storage', 'llama_index.load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (6900, 6917), False, 'import llama_index\n'), ((7079, 7136), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""vector_stores/{company}_vstore"""'], {}), "(Base_Dir, f'vector_stores/{company}_vstore')\n", (7091, 7136), False, 'import os\n'), ((7156, 7209), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'vstore_path'}), '(persist_dir=vstore_path)\n', (7184, 7209), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n'), ((7273, 7325), 'llama_index.load_index_from_storage', 'llama_index.load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (7308, 7325), False, 'import llama_index\n'), ((7473, 7490), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (7485, 7490), True, 'import pandas as pd\n'), ((7543, 7560), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (7555, 7560), True, 'import pandas as pd\n'), ((7609, 7626), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (7621, 7626), True, 'import pandas as pd\n'), ((782, 970), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-001"""', 'prompt': '"""What is the meaning of life?"""', 'temperature': '(0.5)', 'max_tokens': '(60)', 'top_p': '(0.3)', 'frequency_penalty': '(0.5)', 'presence_penalty': '(0.0)'}), "(engine='text-davinci-001', prompt=\n 'What is the meaning of life?', temperature=0.5, max_tokens=60, top_p=\n 0.3, frequency_penalty=0.5, presence_penalty=0.0)\n", (806, 970), False, 'import openai\n'), ((3631, 3658), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (3645, 3658), False, 'import os\n'), ((3668, 3689), 'os.mkdir', 'os.mkdir', (['folder_path'], {}), '(folder_path)\n', (3676, 3689), False, 'import os\n'), ((3739, 3784), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""{company}_2022.pdf"""'], {}), "(Base_Dir, f'{company}_2022.pdf')\n", (3751, 3784), False, 'import os\n'), ((5053, 5130), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""Annual_reports/{company}_report/{company}_2022.txt"""'], {}), "(Base_Dir, f'Annual_reports/{company}_report/{company}_2022.txt')\n", (5065, 5130), False, 'import os\n'), ((5262, 5307), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""{company}_2022.pdf"""'], {}), "(Base_Dir, f'{company}_2022.pdf')\n", (5274, 5307), False, 'import os\n'), ((5315, 5334), 'os.remove', 'os.remove', (['pdf_path'], {}), '(pdf_path)\n', (5324, 5334), False, 'import os\n'), ((5907, 5934), 'os.path.exists', 'os.path.exists', (['vstore_path'], {}), '(vstore_path)\n', (5921, 5934), False, 'import os\n'), ((5945, 5966), 'os.mkdir', 'os.mkdir', (['vstore_path'], {}), '(vstore_path)\n', (5953, 5966), False, 'import os\n'), ((6092, 6163), 'llama_index.PromptHelper', 'PromptHelper', (['max_input', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size'}), '(max_input, max_chunk_overlap, chunk_size_limit=chunk_size)\n', (6104, 6163), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n'), ((6409, 6499), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llmPredictor', 'prompt_helper': 'promptHelpter'}), '(llm_predictor=llmPredictor, prompt_helper=\n promptHelpter)\n', (6437, 6499), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n'), ((6548, 6598), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', ([], {'documents': 'docs'}), '(documents=docs)\n', (6582, 6598), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n'), ((2964, 2981), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2976, 2981), False, 'import requests\n'), ((4028, 4105), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""Annual_reports/{company}_report/{company}_2022.txt"""'], {}), "(Base_Dir, f'Annual_reports/{company}_report/{company}_2022.txt')\n", (4040, 4105), False, 'import os\n'), ((4437, 4456), 'PyPDF2.PdfReader', 'PdfReader', (['pdf_file'], {}), '(pdf_file)\n', (4446, 4456), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((4510, 4521), 'PyPDF2.PdfWriter', 'PdfWriter', ([], {}), '()\n', (4519, 4521), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((6243, 6310), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-ada-001"""', 'max_tokens': 'tokens'}), "(temperature=0, model_name='text-ada-001', max_tokens=tokens)\n", (6249, 6310), False, 'from langchain import OpenAI\n'), ((6332, 6363), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['doc_path'], {}), '(doc_path)\n', (6353, 6363), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n')] |
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
bulk_data = False
# ----------------------------------
async def connect_to_elasticsearch():
# Instantiate the Elasticsearch client right away to check we can connect
from elasticsearch import AsyncElasticsearch
es_client = AsyncElasticsearch(
[os.getenv("ES_URL")],
ssl_assert_fingerprint=os.getenv("ES_CERTIFICATE_FINGERPRINT"),
basic_auth=(os.getenv("ES_USERNAME"), os.getenv("ES_PASSWORD"))
)
await es_client.info() # this connects to the cluster and gets its version
if bulk_data:
await es_client.indices.delete(index=os.getenv("ES_DEFAULT_INDEX"), ignore=[400, 404])
return es_client
def load_data(es_client):
from llama_index import SimpleDirectoryReader
from llama_index.callbacks import (
CallbackManager,
LlamaDebugHandler,
CBEventType,
)
# Creates a reader for the /data folder
if bulk_data:
documents = SimpleDirectoryReader("python/data").load_data(show_progress=True)
# Creates the ES vector store
from llama_index.vector_stores import ElasticsearchStore
ES_DEFAULT_INDEX = os.getenv("ES_DEFAULT_INDEX")
es_vector_store = ElasticsearchStore(
index_name=ES_DEFAULT_INDEX,
es_client=es_client
)
# Service ctx for debug
from llama_index import ServiceContext
from llama_index.llms import OpenAI
llm = OpenAI(model="gpt-3.5-turbo", temperature=0)
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
callback_manager = CallbackManager([llama_debug])
service_context = ServiceContext.from_defaults(
callback_manager=callback_manager, llm=llm
)
# Creates the index
import llama_index
llama_index.set_global_handler("simple")
from llama_index import VectorStoreIndex
from llama_index.storage.storage_context import StorageContext
storage_context = StorageContext.from_defaults(vector_store=es_vector_store)
if bulk_data:
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, service_context=service_context
)
else:
index = VectorStoreIndex.from_vector_store(vector_store=es_vector_store, service_context=service_context)
return index
async def main():
es_client = await connect_to_elasticsearch()
index = load_data(es_client)
# set Logging to DEBUG for more detailed outputs
query_engine = index.as_query_engine()
# What is Prince and what can you tell me about Hyphenation?
while (True):
question = input("Enter your question: ")
if question == "":
question = "what is the address of the bank of yes logic?"
response = query_engine.query(question)
print("**************************** REFERENCES ****************************")
print("Refs " + str(response.source_nodes))
print("**************************** Q&A ****************************")
print("Q: " + question)
print("A: " + str(response))
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.vector_stores.ElasticsearchStore",
"llama_index.SimpleDirectoryReader",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.set_global_handler",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.callbacks.CallbackManager"
] | [((27, 86), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (46, 86), False, 'import logging\n'), ((202, 215), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (213, 215), False, 'from dotenv import load_dotenv\n'), ((234, 261), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (243, 261), False, 'import os\n'), ((3378, 3402), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3400, 3402), False, 'import asyncio\n'), ((118, 158), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (139, 158), False, 'import logging\n'), ((1411, 1440), 'os.getenv', 'os.getenv', (['"""ES_DEFAULT_INDEX"""'], {}), "('ES_DEFAULT_INDEX')\n", (1420, 1440), False, 'import os\n'), ((1464, 1532), 'llama_index.vector_stores.ElasticsearchStore', 'ElasticsearchStore', ([], {'index_name': 'ES_DEFAULT_INDEX', 'es_client': 'es_client'}), '(index_name=ES_DEFAULT_INDEX, es_client=es_client)\n', (1482, 1532), False, 'from llama_index.vector_stores import ElasticsearchStore\n'), ((1683, 1727), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model='gpt-3.5-turbo', temperature=0)\n", (1689, 1727), False, 'from llama_index.llms import OpenAI\n'), ((1746, 1788), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (1763, 1788), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType\n'), ((1812, 1842), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[llama_debug]'], {}), '([llama_debug])\n', (1827, 1842), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType\n'), ((1866, 1938), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'callback_manager': 'callback_manager', 'llm': 'llm'}), '(callback_manager=callback_manager, llm=llm)\n', (1894, 1938), False, 'from llama_index import ServiceContext\n'), ((2005, 2045), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (2035, 2045), False, 'import llama_index\n'), ((2182, 2240), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'es_vector_store'}), '(vector_store=es_vector_store)\n', (2210, 2240), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((87, 106), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (104, 106), False, 'import logging\n'), ((2280, 2392), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(documents, storage_context=storage_context,\n service_context=service_context)\n', (2311, 2392), False, 'from llama_index import VectorStoreIndex\n'), ((2437, 2538), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'es_vector_store', 'service_context': 'service_context'}), '(vector_store=es_vector_store,\n service_context=service_context)\n', (2471, 2538), False, 'from llama_index import VectorStoreIndex\n'), ((530, 549), 'os.getenv', 'os.getenv', (['"""ES_URL"""'], {}), "('ES_URL')\n", (539, 549), False, 'import os\n'), ((583, 622), 'os.getenv', 'os.getenv', (['"""ES_CERTIFICATE_FINGERPRINT"""'], {}), "('ES_CERTIFICATE_FINGERPRINT')\n", (592, 622), False, 'import os\n'), ((644, 668), 'os.getenv', 'os.getenv', (['"""ES_USERNAME"""'], {}), "('ES_USERNAME')\n", (653, 668), False, 'import os\n'), ((670, 694), 'os.getenv', 'os.getenv', (['"""ES_PASSWORD"""'], {}), "('ES_PASSWORD')\n", (679, 694), False, 'import os\n'), ((1225, 1261), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""python/data"""'], {}), "('python/data')\n", (1246, 1261), False, 'from llama_index import SimpleDirectoryReader\n'), ((857, 886), 'os.getenv', 'os.getenv', (['"""ES_DEFAULT_INDEX"""'], {}), "('ES_DEFAULT_INDEX')\n", (866, 886), False, 'import os\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
from typing import Any
import llama_index
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from private_gpt.paths import docs_path
from private_gpt.server.chat.chat_router import chat_router
from private_gpt.server.chunks.chunks_router import chunks_router
from private_gpt.server.completions.completions_router import completions_router
from private_gpt.server.embeddings.embeddings_router import embeddings_router
from private_gpt.server.health.health_router import health_router
from private_gpt.server.ingest.ingest_router import ingest_router
from private_gpt.settings.settings import settings
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
# Start the API
with open(docs_path / "description.md") as description_file:
description = description_file.read()
tags_metadata = [
{
"name": "Ingestion",
"description": "High-level APIs covering document ingestion -internally "
"managing document parsing, splitting,"
"metadata extraction, embedding generation and storage- and ingested "
"documents CRUD."
"Each ingested document is identified by an ID that can be used to filter the "
"context"
"used in *Contextual Completions* and *Context Chunks* APIs.",
},
{
"name": "Contextual Completions",
"description": "High-level APIs covering contextual Chat and Completions. They "
"follow OpenAI's format, extending it to "
"allow using the context coming from ingested documents to create the "
"response. Internally"
"manage context retrieval, prompt engineering and the response generation.",
},
{
"name": "Context Chunks",
"description": "Low-level API that given a query return relevant chunks of "
"text coming from the ingested"
"documents.",
},
{
"name": "Embeddings",
"description": "Low-level API to obtain the vector representation of a given "
"text, using an Embeddings model."
"Follows OpenAI's embeddings API format.",
},
{
"name": "Health",
"description": "Simple health API to make sure the server is up and running.",
},
]
app = FastAPI()
def custom_openapi() -> dict[str, Any]:
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title="PrivateGPT",
description=description,
version="0.1.0",
summary="PrivateGPT is a production-ready AI project that allows you to "
"ask questions to your documents using the power of Large Language "
"Models (LLMs), even in scenarios without Internet connection. "
"100% private, no data leaves your execution environment at any point.",
contact={
"url": "https://github.com/imartinez/privateGPT",
},
license_info={
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
routes=app.routes,
tags=tags_metadata,
)
openapi_schema["info"]["x-logo"] = {
"url": "https://lh3.googleusercontent.com/drive-viewer"
"/AK7aPaD_iNlMoTquOBsw4boh4tIYxyEuhz6EtEs8nzq3yNkNAK00xGj"
"E1KUCmPJSk3TYOjcs6tReG6w_cLu1S7L_gPgT9z52iw=s2560"
}
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi # type: ignore[method-assign]
app.include_router(completions_router)
app.include_router(chat_router)
app.include_router(chunks_router)
app.include_router(ingest_router)
app.include_router(embeddings_router)
app.include_router(health_router)
if settings.ui.enabled:
from private_gpt.ui.ui import mount_in_app
mount_in_app(app)
| [
"llama_index.set_global_handler"
] | [((735, 775), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (765, 775), False, 'import llama_index\n'), ((2313, 2322), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (2320, 2322), False, 'from fastapi import FastAPI\n'), ((2447, 3013), 'fastapi.openapi.utils.get_openapi', 'get_openapi', ([], {'title': '"""PrivateGPT"""', 'description': 'description', 'version': '"""0.1.0"""', 'summary': '"""PrivateGPT is a production-ready AI project that allows you to ask questions to your documents using the power of Large Language Models (LLMs), even in scenarios without Internet connection. 100% private, no data leaves your execution environment at any point."""', 'contact': "{'url': 'https://github.com/imartinez/privateGPT'}", 'license_info': "{'name': 'Apache 2.0', 'url':\n 'https://www.apache.org/licenses/LICENSE-2.0.html'}", 'routes': 'app.routes', 'tags': 'tags_metadata'}), "(title='PrivateGPT', description=description, version='0.1.0',\n summary=\n 'PrivateGPT is a production-ready AI project that allows you to ask questions to your documents using the power of Large Language Models (LLMs), even in scenarios without Internet connection. 100% private, no data leaves your execution environment at any point.'\n , contact={'url': 'https://github.com/imartinez/privateGPT'},\n license_info={'name': 'Apache 2.0', 'url':\n 'https://www.apache.org/licenses/LICENSE-2.0.html'}, routes=app.routes,\n tags=tags_metadata)\n", (2458, 3013), False, 'from fastapi.openapi.utils import get_openapi\n'), ((3811, 3828), 'private_gpt.ui.ui.mount_in_app', 'mount_in_app', (['app'], {}), '(app)\n', (3823, 3828), False, 'from private_gpt.ui.ui import mount_in_app\n')] |
import llama_index
from .di import global_injector
from .launcher import create_app
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((86, 126), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (116, 126), False, 'import llama_index\n')] |
import logging
from dataclasses import dataclass
from typing import List, Optional
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.llm_predictor import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.llms.llm import LLM
from llama_index.llms.utils import LLMType, resolve_llm
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser, TextSplitter
from llama_index.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.prompts.base import BasePromptTemplate
from llama_index.schema import TransformComponent
from llama_index.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm=llm,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
context_window=context_window,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
chunk_overlap=chunk_overlap,
num_output=num_output,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
transformations=transformations,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm.system_prompt = llm.system_prompt or system_prompt
llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt
llm.pydantic_program_mode = (
llm.pydantic_program_mode or pydantic_program_mode
)
if llm_predictor is not None:
print("LLMPredictor is deprecated, please use LLM instead.")
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.embeddings.loading import load_embed_model
from llama_index.extractors.loading import load_extractor
from llama_index.llm_predictor.loading import load_predictor
from llama_index.node_parser.loading import load_parser
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.extractors.loading.load_extractor",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.llm_predictor.LLMPredictor",
"llama_index.logger.LlamaLogger",
"llama_index.embeddings.utils.resolve_embed_model",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.callbacks.base.CallbackManager",
"llama_index.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.llm_predictor.loading.load_predictor"
] | [((1018, 1045), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1035, 1045), False, 'import logging\n'), ((1820, 1877), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1850, 1877), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((7504, 7536), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7523, 7536), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((11065, 11097), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11084, 11097), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14239, 14289), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14253, 14289), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((14313, 14363), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14329, 14363), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((14389, 14447), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14411, 14447), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6248, 6267), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6263, 6267), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6435, 6451), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6446, 6451), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((6883, 6949), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (6895, 6949), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((8412, 8425), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8423, 8425), False, 'from llama_index.logger import LlamaLogger\n'), ((10360, 10376), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10371, 10376), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((10405, 10426), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10417, 10426), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((1419, 1436), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1434, 1436), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((14623, 14645), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (14634, 14645), False, 'from llama_index.node_parser.loading import load_parser\n'), ((14717, 14742), 'llama_index.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (14731, 14742), False, 'from llama_index.extractors.loading import load_extractor\n')] |
import os
import time
import openai
import requests
from dotenv import load_dotenv
from llama_index.core import Settings
import llama_index.core
from llama_index.core.indices.vector_store.base import VectorStoreIndex
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.vector_stores.pinecone import PineconeVectorStore
from logger import AppLogger
from pinecone import Pinecone
from utils import get_logger
from constant import *
load_dotenv()
logging = get_logger()
class OpenAIChat:
def __init__(self):
self.setup_openai_api()
self.setup_pinecone()
self.setup_llama_index_settings()
self.setup_chat_engine()
def setup_openai_api(self):
"""
Set up the OpenAI API by setting the API key from the environment variable and configuring the OpenAI API with the key.
"""
self.openai_api_key = os.getenv("OPENAI_API_KEY")
openai.api_key = self.openai_api_key
logging.info("OpenAI API has been set up.")
def setup_pinecone(self):
"""
Sets up the pinecone by retrieving the API key from the environment variables and creating a pinecone index.
"""
pinecone_api_key = os.getenv("PINECONE_API_KEY")
self.pinecone_index = self.create_pinecone_index(pinecone_api_key)
logging.info("Pinecone has been set up.")
def create_pinecone_index(self, pinecone_api_key):
"""
Create a Pinecone index using the provided Pinecone API key.
Args:
pinecone_api_key (str): The API key for accessing the Pinecone service.
Returns:
Pinecone Index: The created Pinecone index for the specified collection.
"""
pc = Pinecone(api_key=pinecone_api_key)
index = pc.Index("chatbot1")
logging.info("Pinecone index has been created.")
return index
def setup_llama_index_settings(self):
"""
Set up the llama index settings including creating an OpenAI model, setting various settings, and logging the result.
"""
llm = OpenAI(model="gpt-3.5-turbo-0125", temperature=0)
Settings.llm = llm
Settings.embed_model = OpenAIEmbedding(embed_batch_size=50)
Settings.text_splitter = SentenceSplitter()
Settings.transformations = [SentenceSplitter(chunk_size=1024)]
logging.info("Llama index settings have been set up.")
def setup_chat_engine(self):
"""
Initializes the chat engine by loading documents, vector store, and creating chat engine.
Then, it reindexes the documents.
"""
self.documents = self.load_documents()
self.base_index = self.load_vector_store()
self.chat_engine_base = self.create_chat_engine()
self.reindex_documents()
logging.info("Chat engine has been initialized and documents have been reindexed.")
def reindex_documents(self):
"""
Reindexes documents if requested, and logs the status of the reindexing process.
"""
# reindex_requested = os.getenv("REINDEX", "no").lower() == "yes"
if REINDEX:
self.base_index = self.index_documents()
logging.info("Documents have been reindexed as requested.")
else:
logging.info("Reindexing not requested. Using the existing index.")
def initialize_pinecone_vector_store(self):
"""
Initializes the pinecone vector store using the provided pinecone index and returns a PineconeVectorStore object.
"""
store = PineconeVectorStore(pinecone_index=self.pinecone_index)
logging.info("Pinecone vector store has been initialized.")
return store
def index_documents(self):
"""
Indexes documents and returns the base index.
Returns:
VectorStoreIndex: The base index of the indexed documents.
"""
vector_store = self.initialize_pinecone_vector_store()
base_node_parser = SentenceSplitter()
base_nodes = base_node_parser.get_nodes_from_documents(self.documents)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
base_index = VectorStoreIndex(base_nodes, storage_context=storage_context)
logging.info("Documents have been indexed and saved.")
return base_index
def load_documents(self):
"""
Load documents from a simple directory reader and log the number of documents loaded.
:return: The loaded documents.
"""
documents = SimpleDirectoryReader(SOP_DATA_PATH).load_data()
logging.info(f"Loaded {len(documents)} documents.")
return documents
def load_vector_store(self):
"""
Load the vector store and create a vector store index from it.
:return: Vector store index
"""
vector_store = PineconeVectorStore(pinecone_index=self.pinecone_index)
index = VectorStoreIndex.from_vector_store(vector_store)
logging.info("Vector store has been loaded and index has been created.")
return index
def create_chat_engine(self):
"""
Create a chat engine using the system prompt and return it.
"""
system_prompt = self.get_system_prompt()
chat_engine = self.base_index.as_chat_engine(
chat_mode="context",
streaming=True,
memory=ChatMemoryBuffer.from_defaults(token_limit=3000),
system_prompt=system_prompt,
)
logging.info("Chat engine has been created.")
return chat_engine
def get_system_prompt(self):
"""
Get the system prompt for Crest Data Systems' assistant.
"""
return (
"As Crest Data Systems' assistant, provide precise, complete answers and engage smoothly."
"Note that you should answer user queries based on the documents you have indexed. Ensure to give your answer in well-defined steps."
"Ensure to answer all the questions with respect to crest data systems."
"If you don't know the correct answer, prepend the following at the start of the response: Although I couldn't find anything in our knowledge base, here are the general steps to follow. and append the following at the end of the answer: Please contact Crest IT support at IT Helpdesk google chat for further assistance."
)
def reset_chat_engine(self):
"""
Reset the chat engine.
"""
self.chat_engine_base.reset()
# Function to interact with the chat engine
def ask_and_respond(self, user_query):
"""
Function to ask a user query and get a response from the chat engine.
:param user_query: the query entered by the user
:return: the response stream from the chat engine
"""
start_time = time.time()
response_stream = self.chat_engine_base.stream_chat(user_query)
end_time = time.time()
logging.info(f"Time taken for this response: {end_time - start_time}")
return response_stream
def extract_response_data(self, response):
"""Extracts the response data from the API response."""
if response.status_code == 200:
return response.json()["choices"][0]["message"]["content"]
else:
return "API request failed with status code: " + str(response.status_code)
def start_image_processing(self, image_base64, user_query=DEFAULT_USER_QUERY, image_type="image/png"):
"""Processes an image given in base64 encoding and a user query to generate a response using OpenAI's Vision API."""
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.openai_api_key,
}
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What is the overall issue that user is facing in this image? Start with I am facing an issue.... Pay special attention to the software name and any errors contained in the screenshot. Give a detailed 1 liner answer.",
},
{
"type": "image_url",
"image_url": {
"url": f"data:{image_type};base64,{image_base64}"
},
},
],
}
],
"max_tokens": 300,
}
response = requests.post(url, headers=headers, json=payload)
content = self.extract_response_data(response)
user_query = user_query + " " + content
logging.info("User query after image processing: " + user_query)
response = self.ask_and_respond(user_query)
return response | [
"llama_index.llms.openai.OpenAI",
"llama_index.core.memory.ChatMemoryBuffer.from_defaults",
"llama_index.core.StorageContext.from_defaults",
"llama_index.vector_stores.pinecone.PineconeVectorStore",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.indices.vector_store.base.VectorStoreIndex",
"llama_index.core.indices.vector_store.base.VectorStoreIndex.from_vector_store",
"llama_index.core.SimpleDirectoryReader",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((675, 688), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (686, 688), False, 'from dotenv import load_dotenv\n'), ((700, 712), 'utils.get_logger', 'get_logger', ([], {}), '()\n', (710, 712), False, 'from utils import get_logger\n'), ((1108, 1135), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1117, 1135), False, 'import os\n'), ((1432, 1461), 'os.getenv', 'os.getenv', (['"""PINECONE_API_KEY"""'], {}), "('PINECONE_API_KEY')\n", (1441, 1461), False, 'import os\n'), ((1951, 1985), 'pinecone.Pinecone', 'Pinecone', ([], {'api_key': 'pinecone_api_key'}), '(api_key=pinecone_api_key)\n', (1959, 1985), False, 'from pinecone import Pinecone\n'), ((2312, 2361), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-0125"""', 'temperature': '(0)'}), "(model='gpt-3.5-turbo-0125', temperature=0)\n", (2318, 2361), False, 'from llama_index.llms.openai import OpenAI\n'), ((2420, 2456), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'embed_batch_size': '(50)'}), '(embed_batch_size=50)\n', (2435, 2456), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((2490, 2508), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (2506, 2508), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((3814, 3869), 'llama_index.vector_stores.pinecone.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'self.pinecone_index'}), '(pinecone_index=self.pinecone_index)\n', (3833, 3869), False, 'from llama_index.vector_stores.pinecone import PineconeVectorStore\n'), ((4252, 4270), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (4268, 4270), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((4376, 4431), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (4404, 4431), False, 'from llama_index.core import SimpleDirectoryReader, StorageContext\n'), ((4453, 4514), 'llama_index.core.indices.vector_store.base.VectorStoreIndex', 'VectorStoreIndex', (['base_nodes'], {'storage_context': 'storage_context'}), '(base_nodes, storage_context=storage_context)\n', (4469, 4514), False, 'from llama_index.core.indices.vector_store.base import VectorStoreIndex\n'), ((5145, 5200), 'llama_index.vector_stores.pinecone.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'self.pinecone_index'}), '(pinecone_index=self.pinecone_index)\n', (5164, 5200), False, 'from llama_index.vector_stores.pinecone import PineconeVectorStore\n'), ((5217, 5265), 'llama_index.core.indices.vector_store.base.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (5251, 5265), False, 'from llama_index.core.indices.vector_store.base import VectorStoreIndex\n'), ((7144, 7155), 'time.time', 'time.time', ([], {}), '()\n', (7153, 7155), False, 'import time\n'), ((7247, 7258), 'time.time', 'time.time', ([], {}), '()\n', (7256, 7258), False, 'import time\n'), ((9031, 9080), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'json': 'payload'}), '(url, headers=headers, json=payload)\n', (9044, 9080), False, 'import requests\n'), ((2545, 2578), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(1024)'}), '(chunk_size=1024)\n', (2561, 2578), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((4818, 4854), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['SOP_DATA_PATH'], {}), '(SOP_DATA_PATH)\n', (4839, 4854), False, 'from llama_index.core import SimpleDirectoryReader, StorageContext\n'), ((5678, 5726), 'llama_index.core.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(3000)'}), '(token_limit=3000)\n', (5708, 5726), False, 'from llama_index.core.memory import ChatMemoryBuffer\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from auth_RAG.di import global_injector
from auth_RAG.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((211, 251), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (241, 251), False, 'import llama_index\n'), ((259, 286), 'auth_RAG.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (269, 286), False, 'from auth_RAG.launcher import create_app\n')] |
import llama_index
from pydantic import BaseModel
from typing import List
from typing import Optional
class Section(BaseModel):
section_id: str
section_text: str
vector_representation: Optional[List[float]]
keywords: Optional[List[str]]
named_entities: Optional[List[str]]
summary: Optional[str]
sentiment: Optional[float]
document_id: str
def to_llama_format(self):
"""Converts the CogniSphere's Section into a llama's Document format."""
extra_info = {
"section_id": self.section_id or "",
"document_id": self.document_id or "",
"summary": self.summary or "",
"sentiment": self.sentiment or "",
"keywords": ", ".join(self.keywords) if self.keywords else "",
"named_entities": ", ".join(self.named_entities)
if self.named_entities
else "",
}
return llama_index.Document(
text=self.section_text or "",
doc_id=f"{self.document_id}-{self.section_id}"
if self.document_id and self.section_id
else "",
extra_info=extra_info,
embedding=self.vector_representation or [],
)
class Document(BaseModel):
id: str
document_id: str
title: Optional[str]
author: Optional[str]
publication_date: Optional[str]
genre: Optional[str]
publisher: Optional[str]
language: Optional[str]
isbn: Optional[str]
summary: Optional[str]
vector_representation: Optional[List[float]]
sections: List[Section]
def get_section_keywords(self):
section_keywords = [
keyword
for section in self.sections
if section.keywords
for keyword in section.keywords
]
# remove duplicates
return list(set(section_keywords))
def get_section_named_entities(self):
section_named_entities = [
entity
for section in self.sections
if section.named_entities
for entity in section.named_entities
]
# remove duplicates
return list(set(section_named_entities))
def get_text(self):
return " ".join(section.section_text for section in self.sections)
| [
"llama_index.Document"
] | [((918, 1146), 'llama_index.Document', 'llama_index.Document', ([], {'text': "(self.section_text or '')", 'doc_id': "(f'{self.document_id}-{self.section_id}' if self.document_id and self.\n section_id else '')", 'extra_info': 'extra_info', 'embedding': '(self.vector_representation or [])'}), "(text=self.section_text or '', doc_id=\n f'{self.document_id}-{self.section_id}' if self.document_id and self.\n section_id else '', extra_info=extra_info, embedding=self.\n vector_representation or [])\n", (938, 1146), False, 'import llama_index\n')] |
## create graph
from pyvis.network import Network
import llama_index.core
from llama_index.core import StorageContext, load_index_from_storage
storage_context = StorageContext.from_defaults(persist_dir="math_index_persist")
index = load_index_from_storage(storage_context)
# retriever = llama_index.core.indices.knowledge_graph.KGTableRetriever(index)
g = index.get_networkx_graph()
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(g)
net.show("example.html")
| [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage"
] | [((162, 224), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""math_index_persist"""'}), "(persist_dir='math_index_persist')\n", (190, 224), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((233, 273), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (256, 273), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((391, 453), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (398, 453), False, 'from pyvis.network import Network\n')] |
import json
import tiktoken
from llama_index.query_pipeline.query import QueryPipeline
from llama_index.llms import OpenAI
from llama_index.prompts import PromptTemplate
import llama_index
import openai
import weaviate
from llama_index import SimpleDirectoryReader
from llama_index.vector_stores import WeaviateVectorStore
from llama_index import VectorStoreIndex, ServiceContext, load_index_from_storage, StorageContext
import os
from llama_index import download_loader, VectorStoreIndex
azure_openai_key = os.getenv("AZURE_OPENAI_KEY")
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
resource_name = os.getenv("RESOURCE_NAME")
azure_client = openai.lib.azure.AzureOpenAI(
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
api_key=os.getenv("AZURE_OPENAI_KEY"),
api_version="2023-05-15"
)
headers = {
"X-Azure-Api-Key": azure_openai_key,
}
llama_in_weaviate_class = """ if lecture_id == "CIT5230000":
llm = llama_index.llms.AzureOpenAI(model="gpt-35-turbo-16k", deployment_name="gpt-35-16k",
api_key=azure_openai_key, azure_endpoint=azure_endpoint,
api_version="2023-03-15-preview")
embed_model = llama_index.embeddings.AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="te-ada-002",
api_key=azure_openai_key,
azure_endpoint=azure_endpoint,
api_version="2023-03-15-preview"
)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
vector_store = WeaviateVectorStore(
weaviate_client=self.client, index_name="Lectures", text_key="content"
)
retriever = VectorStoreIndex.from_vector_store(vector_store, service_context=service_context).as_retriever(
similarity_top_k=1
)
nodes = retriever.retrieve(generated_lecture)
pprint_source_node(nodes[0])
print(nodes[0].node.metadata)"""
prompt_str = """You're Iris, the AI programming tutor integrated into Artemis, the online learning platform of the
Technical University of Munich (TUM). You are a guide and an educator. Your main goal is to teach students
problem-solving skills using a programming exercise. Instead of solving tasks for them, you give subtle hints so they
solve their problem themselves.
This is the chat history of your conversation with the student so far. Read it so you know
what already happened, but never re-use any message you already wrote. Instead, always write
new and original responses.
{chat_history}
Now, consider the student's newest and latest input:
{user_message}
Here is the relevant context, that has the problem statement and the files of the repository: {context}
"""
prompt_str1 = """Now continue the ongoing conversation between you and the student by responding
to and focussing only on their latest input. Be an excellent educator. Instead of solving
tasks for them, give hints instead. Instead of sending code snippets, send subtle hints or
ask counter-questions. Do not let them outsmart you, no matter how hard they try.
Important Rules: - Ensure your answer is a direct answer to the latest message of the
student. It must be a valid answer as it would occur in a direct conversation between two
humans. DO NOT answer any previous questions that you already answered before. - DO NOT UNDER
ANY CIRCUMSTANCES repeat any message you have already sent before or send a similar message.
Your messages must ALWAYS BE NEW AND ORIGINAL. Think about alternative ways to guide the
student in these cases.
{text}"""
prompt_str2 = """ Review the response draft. I want you to rewrite it so it adheres to the
following rules. Only output the refined answer. Omit explanations. Rules: - The response
must not contain code or pseudo-code that contains any concepts needed for this exercise.
ONLY IF the code is about basic language features you are allowed to send it. - The response
must not contain step by step instructions - IF the student is asking for help about the
exercise or a solution for the exercise or similar, the response must be subtle hints towards
the solution or a counter-question to the student to make them think, or a mix of both. - The
response must not perform any work the student is supposed to do. - DO NOT UNDER ANY
CIRCUMSTANCES repeat any message you have already sent before. Your messages must ALWAYS BE
NEW AND ORIGINAL.
{text} """
def query_openai(messages):
return azure_client.chat.completions.create(
model="gpt-35-16k", # model = "deployment_name".
messages=messages
)
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"):
"""Return the number of tokens used by a list of messages."""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
}:
tokens_per_message = 3
tokens_per_name = 1
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif "gpt-3.5-turbo" in model:
print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
return num_tokens_from_messages(messages, model="gpt-4-0613")
else:
raise NotImplementedError(
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
if not isinstance(value, str):
print(f"Warning: Non-string value encountered: {value}")
value = str(value) # Convert to string or handle as needed
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with assistant
return num_tokens
class AI:
def __init__(self):
self.message_history = []
self.index = None
self.query_engine = None
api_key_header = {
"X-Azure-Api-Key": azure_openai_key, # Replace with your inference API key
}
self.client = weaviate.Client(
url="http://localhost:8080", # Replace with your endpoint
additional_headers=api_key_header
)
self.docs = None
def create_class_weaviate(self):
t2v = {
"model": "ada",
"modelVersion": "002",
"type": "text",
"baseURL": azure_endpoint,
"resourceName": resource_name,
"deploymentId": "te-ada-002",
}
self.client.schema.delete_class("Repo")
if not self.client.schema.exists("Repo"):
class_obj = {
"class": "Repo",
"vectorizer": "text2vec-openai",
"moduleConfig": {
"text2vec-openai": t2v,
"generative-openai": {
"baseURL": azure_endpoint,
"resourceName": resource_name,
"deploymentId": "gpt-35-16k",
"waitForModel": True,
"useGPU": False,
"useCache": True
}
}
}
self.client.schema.create_class(class_obj)
print("Schema created")
def create_class_llama(self):
llm = llama_index.llms.AzureOpenAI(model="gpt-35-turbo-16k", deployment_name="gpt-35-16k",
api_key=azure_openai_key, azure_endpoint=azure_endpoint,
api_version="2023-03-15-preview")
embed_model = llama_index.embeddings.AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="te-ada-002",
api_key=azure_openai_key,
azure_endpoint=azure_endpoint,
api_version="2023-03-15-preview"
)
reader = SimpleDirectoryReader(input_dir="/MVP/Programming_exercise",
recursive=True)
self.docs = reader.load_data()
self.create_class_weaviate()
vector_store = WeaviateVectorStore(weaviate_client=self.client, index_name="Repo", text_key="content")
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model, chunk_size=512,
chunk_overlap=50)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
self.index = VectorStoreIndex.from_documents(self.docs, storage_context=storage_context,
service_context=service_context)
self.query_engine = self.index.as_query_engine()
def generate_response(self, user_message):
if self.query_engine is None:
# The commented code is for querying with llamaindex
# vector_store = WeaviateVectorStore(
# weaviate_client=self.client, index_name="Repo"
# )
# loaded_index = VectorStoreIndex.from_vector_store(vector_store)
# query_engine = loaded_index.as_query_engine(
# vector_store_query_mode="hybrid", similarity_top_k=3, alpha=1
# )
# response = query_engine.query(user_message)
response = (
self.client.query
.get("Repo", ["content", "file_name"])
.with_near_text({"concepts": user_message})
.with_where({
"path": ["file_name"],
"operator": "Equal",
"valueText": "ProblemStatement"
})
.with_limit(3)
.do()
)
generated = response["data"]["Get"]["Repo"]
response = (
self.client.query
.get("Repo", ["content", "file_name"])
.with_near_text({"concepts": user_message})
.with_limit(3)
.do()
)
generated_response = response["data"]["Get"]["Repo"]
self.message_history.append({"role": "user", "content": user_message})
prompt_tmpl = PromptTemplate(prompt_str)
prompt_tmpl1 = PromptTemplate(prompt_str1)
prompt_tmpl2 = PromptTemplate(prompt_str2)
llm = OpenAI(model="gpt-3.5-turbo")
llm_c = llm.as_query_component(streaming=True)
p = QueryPipeline(chain=[prompt_tmpl, llm_c, prompt_tmpl1, llm_c, prompt_tmpl2, llm], verbose=True)
if num_tokens_from_messages(self.message_history) > 600:
completion = query_openai(self.message_history.append({"role": "system", "content": "summarize the content above, "
"keep all only the relevant "
"information, do not exceed "
"600 tokens"}))
self.message_history = [completion.choices[0].message.content]
output = p.run(chat_history=self.message_history.__str__(),
context=json.dumps(generated, indent=4) + "\n\n" + json.dumps(generated_response, indent=4),
user_message=user_message)
self.message_history.append({"role": "system", "content": output})
print(str(output))
return str(output)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.embeddings.AzureOpenAIEmbedding",
"llama_index.ServiceContext.from_defaults",
"llama_index.prompts.PromptTemplate",
"llama_index.llms.OpenAI",
"llama_index.StorageContext.from_defaults",
"llama_index.query_pipeline.query.QueryPipeline",
"llama_index.llms.AzureOpenAI",
"llama_index.vector_stores.WeaviateVectorStore"
] | [((510, 539), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_KEY"""'], {}), "('AZURE_OPENAI_KEY')\n", (519, 539), False, 'import os\n'), ((557, 591), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_ENDPOINT"""'], {}), "('AZURE_OPENAI_ENDPOINT')\n", (566, 591), False, 'import os\n'), ((608, 634), 'os.getenv', 'os.getenv', (['"""RESOURCE_NAME"""'], {}), "('RESOURCE_NAME')\n", (617, 634), False, 'import os\n'), ((699, 733), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_ENDPOINT"""'], {}), "('AZURE_OPENAI_ENDPOINT')\n", (708, 733), False, 'import os\n'), ((747, 776), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_KEY"""'], {}), "('AZURE_OPENAI_KEY')\n", (756, 776), False, 'import os\n'), ((5657, 5691), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['model'], {}), '(model)\n', (5684, 5691), False, 'import tiktoken\n'), ((7787, 7866), 'weaviate.Client', 'weaviate.Client', ([], {'url': '"""http://localhost:8080"""', 'additional_headers': 'api_key_header'}), "(url='http://localhost:8080', additional_headers=api_key_header)\n", (7802, 7866), False, 'import weaviate\n'), ((9044, 9228), 'llama_index.llms.AzureOpenAI', 'llama_index.llms.AzureOpenAI', ([], {'model': '"""gpt-35-turbo-16k"""', 'deployment_name': '"""gpt-35-16k"""', 'api_key': 'azure_openai_key', 'azure_endpoint': 'azure_endpoint', 'api_version': '"""2023-03-15-preview"""'}), "(model='gpt-35-turbo-16k', deployment_name=\n 'gpt-35-16k', api_key=azure_openai_key, azure_endpoint=azure_endpoint,\n api_version='2023-03-15-preview')\n", (9072, 9228), False, 'import llama_index\n'), ((9328, 9533), 'llama_index.embeddings.AzureOpenAIEmbedding', 'llama_index.embeddings.AzureOpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'deployment_name': '"""te-ada-002"""', 'api_key': 'azure_openai_key', 'azure_endpoint': 'azure_endpoint', 'api_version': '"""2023-03-15-preview"""'}), "(model='text-embedding-ada-002',\n deployment_name='te-ada-002', api_key=azure_openai_key, azure_endpoint=\n azure_endpoint, api_version='2023-03-15-preview')\n", (9371, 9533), False, 'import llama_index\n'), ((9612, 9688), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""/MVP/Programming_exercise"""', 'recursive': '(True)'}), "(input_dir='/MVP/Programming_exercise', recursive=True)\n", (9633, 9688), False, 'from llama_index import SimpleDirectoryReader\n'), ((9827, 9918), 'llama_index.vector_stores.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'self.client', 'index_name': '"""Repo"""', 'text_key': '"""content"""'}), "(weaviate_client=self.client, index_name='Repo',\n text_key='content')\n", (9846, 9918), False, 'from llama_index.vector_stores import WeaviateVectorStore\n'), ((9941, 10042), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'chunk_size': '(512)', 'chunk_overlap': '(50)'}), '(llm=llm, embed_model=embed_model, chunk_size=\n 512, chunk_overlap=50)\n', (9969, 10042), False, 'from llama_index import VectorStoreIndex, ServiceContext, load_index_from_storage, StorageContext\n'), ((10119, 10174), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (10147, 10174), False, 'from llama_index import VectorStoreIndex, ServiceContext, load_index_from_storage, StorageContext\n'), ((10196, 10308), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['self.docs'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(self.docs, storage_context=storage_context,\n service_context=service_context)\n', (10227, 10308), False, 'from llama_index import download_loader, VectorStoreIndex\n'), ((5803, 5839), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""cl100k_base"""'], {}), "('cl100k_base')\n", (5824, 5839), False, 'import tiktoken\n'), ((11883, 11909), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (11897, 11909), False, 'from llama_index.prompts import PromptTemplate\n'), ((11937, 11964), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['prompt_str1'], {}), '(prompt_str1)\n', (11951, 11964), False, 'from llama_index.prompts import PromptTemplate\n'), ((11992, 12019), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['prompt_str2'], {}), '(prompt_str2)\n', (12006, 12019), False, 'from llama_index.prompts import PromptTemplate\n'), ((12039, 12068), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (12045, 12068), False, 'from llama_index.llms import OpenAI\n'), ((12144, 12243), 'llama_index.query_pipeline.query.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_tmpl, llm_c, prompt_tmpl1, llm_c, prompt_tmpl2, llm]', 'verbose': '(True)'}), '(chain=[prompt_tmpl, llm_c, prompt_tmpl1, llm_c, prompt_tmpl2,\n llm], verbose=True)\n', (12157, 12243), False, 'from llama_index.query_pipeline.query import QueryPipeline\n'), ((13007, 13047), 'json.dumps', 'json.dumps', (['generated_response'], {'indent': '(4)'}), '(generated_response, indent=4)\n', (13017, 13047), False, 'import json\n'), ((12964, 12995), 'json.dumps', 'json.dumps', (['generated'], {'indent': '(4)'}), '(generated, indent=4)\n', (12974, 12995), False, 'import json\n')] |
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Callable, List, Optional
if TYPE_CHECKING:
from llama_index import ServiceContext
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager
from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model
from llama_index.core.indices.prompt_helper import PromptHelper
from llama_index.core.llms import LLM
from llama_index.core.llms.utils import LLMType, resolve_llm
from llama_index.core.node_parser import NodeParser, SentenceSplitter
from llama_index.core.schema import TransformComponent
from llama_index.core.types import PydanticProgramMode
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
@dataclass
class _Settings:
"""Settings for the Llama Index, lazily initialized."""
# lazy initialization
_llm: Optional[LLM] = None
_embed_model: Optional[BaseEmbedding] = None
_callback_manager: Optional[CallbackManager] = None
_tokenizer: Optional[Callable[[str], List[Any]]] = None
_node_parser: Optional[NodeParser] = None
_prompt_helper: Optional[PromptHelper] = None
_transformations: Optional[List[TransformComponent]] = None
# ---- LLM ----
@property
def llm(self) -> LLM:
"""Get the LLM."""
if self._llm is None:
self._llm = resolve_llm("default")
if self._callback_manager is not None:
self._llm.callback_manager = self._callback_manager
return self._llm
@llm.setter
def llm(self, llm: LLMType) -> None:
"""Set the LLM."""
self._llm = resolve_llm(llm)
@property
def pydantic_program_mode(self) -> PydanticProgramMode:
"""Get the pydantic program mode."""
return self.llm.pydantic_program_mode
@pydantic_program_mode.setter
def pydantic_program_mode(self, pydantic_program_mode: PydanticProgramMode) -> None:
"""Set the pydantic program mode."""
self.llm.pydantic_program_mode = pydantic_program_mode
# ---- Embedding ----
@property
def embed_model(self) -> BaseEmbedding:
"""Get the embedding model."""
if self._embed_model is None:
self._embed_model = resolve_embed_model("default")
if self._callback_manager is not None:
self._embed_model.callback_manager = self._callback_manager
return self._embed_model
@embed_model.setter
def embed_model(self, embed_model: EmbedType) -> None:
"""Set the embedding model."""
self._embed_model = resolve_embed_model(embed_model)
# ---- Callbacks ----
@property
def global_handler(self) -> Optional[BaseCallbackHandler]:
"""Get the global handler."""
import llama_index.core
# TODO: deprecated?
return llama_index.global_handler
@global_handler.setter
def global_handler(self, eval_mode: str, **eval_params: Any) -> None:
"""Set the global handler."""
from llama_index import set_global_handler
# TODO: deprecated?
set_global_handler(eval_mode, **eval_params)
@property
def callback_manager(self) -> CallbackManager:
"""Get the callback manager."""
if self._callback_manager is None:
self._callback_manager = CallbackManager()
return self._callback_manager
@callback_manager.setter
def callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set the callback manager."""
self._callback_manager = callback_manager
# ---- Tokenizer ----
@property
def tokenizer(self) -> Callable[[str], List[Any]]:
"""Get the tokenizer."""
import llama_index.core
if llama_index.global_tokenizer is None:
return get_tokenizer()
# TODO: deprecated?
return llama_index.global_tokenizer
@tokenizer.setter
def tokenizer(self, tokenizer: Callable[[str], List[Any]]) -> None:
"""Set the tokenizer."""
try:
from transformers import PreTrainedTokenizerBase # pants: no-infer-dep
if isinstance(tokenizer, PreTrainedTokenizerBase):
from functools import partial
tokenizer = partial(tokenizer.encode, add_special_tokens=False)
except ImportError:
pass
# TODO: deprecated?
set_global_tokenizer(tokenizer)
# ---- Node parser ----
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
if self._node_parser is None:
self._node_parser = SentenceSplitter()
if self._callback_manager is not None:
self._node_parser.callback_manager = self._callback_manager
return self._node_parser
@node_parser.setter
def node_parser(self, node_parser: NodeParser) -> None:
"""Set the node parser."""
self._node_parser = node_parser
@property
def chunk_size(self) -> int:
"""Get the chunk size."""
if hasattr(self.node_parser, "chunk_size"):
return self.node_parser.chunk_size
else:
raise ValueError("Configured node parser does not have chunk size.")
@chunk_size.setter
def chunk_size(self, chunk_size: int) -> None:
"""Set the chunk size."""
if hasattr(self.node_parser, "chunk_size"):
self.node_parser.chunk_size = chunk_size
else:
raise ValueError("Configured node parser does not have chunk size.")
@property
def chunk_overlap(self) -> int:
"""Get the chunk overlap."""
if hasattr(self.node_parser, "chunk_overlap"):
return self.node_parser.chunk_overlap
else:
raise ValueError("Configured node parser does not have chunk overlap.")
@chunk_overlap.setter
def chunk_overlap(self, chunk_overlap: int) -> None:
"""Set the chunk overlap."""
if hasattr(self.node_parser, "chunk_overlap"):
self.node_parser.chunk_overlap = chunk_overlap
else:
raise ValueError("Configured node parser does not have chunk overlap.")
# ---- Node parser alias ----
@property
def text_splitter(self) -> NodeParser:
"""Get the text splitter."""
return self.node_parser
@text_splitter.setter
def text_splitter(self, text_splitter: NodeParser) -> None:
"""Set the text splitter."""
self.node_parser = text_splitter
# ---- Prompt helper ----
@property
def prompt_helper(self) -> PromptHelper:
"""Get the prompt helper."""
if self._llm is not None and self._prompt_helper is None:
self._prompt_helper = PromptHelper.from_llm_metadata(self._llm.metadata)
elif self._prompt_helper is None:
self._prompt_helper = PromptHelper()
return self._prompt_helper
@prompt_helper.setter
def prompt_helper(self, prompt_helper: PromptHelper) -> None:
"""Set the prompt helper."""
self._prompt_helper = prompt_helper
@property
def num_output(self) -> int:
"""Get the number of outputs."""
return self.prompt_helper.num_output
@num_output.setter
def num_output(self, num_output: int) -> None:
"""Set the number of outputs."""
self.prompt_helper.num_output = num_output
@property
def context_window(self) -> int:
"""Get the context window."""
return self.prompt_helper.context_window
@context_window.setter
def context_window(self, context_window: int) -> None:
"""Set the context window."""
self.prompt_helper.context_window = context_window
# ---- Transformations ----
@property
def transformations(self) -> List[TransformComponent]:
"""Get the transformations."""
if self._transformations is None:
self._transformations = [self.node_parser]
return self._transformations
@transformations.setter
def transformations(self, transformations: List[TransformComponent]) -> None:
"""Set the transformations."""
self._transformations = transformations
# Singleton
Settings = _Settings()
# -- Helper functions for deprecation/migration --
def llm_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> LLM:
"""Get settings from either settings or context."""
if context is not None:
return context.llm
return settings.llm
def embed_model_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> BaseEmbedding:
"""Get settings from either settings or context."""
if context is not None:
return context.embed_model
return settings.embed_model
def callback_manager_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> CallbackManager:
"""Get settings from either settings or context."""
if context is not None:
return context.callback_manager
return settings.callback_manager
def node_parser_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> NodeParser:
"""Get settings from either settings or context."""
if context is not None:
return context.node_parser
return settings.node_parser
def prompt_helper_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> PromptHelper:
"""Get settings from either settings or context."""
if context is not None:
return context.prompt_helper
return settings.prompt_helper
def transformations_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> List[TransformComponent]:
"""Get settings from either settings or context."""
if context is not None:
return context.transformations
return settings.transformations
| [
"llama_index.core.llms.utils.resolve_llm",
"llama_index.core.utils.get_tokenizer",
"llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.core.embeddings.utils.resolve_embed_model",
"llama_index.set_global_handler",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.callbacks.base.CallbackManager",
"llama_index.core.indices.prompt_helper.PromptHelper",
"llama_index.core.utils.set_global_tokenizer"
] | [((1680, 1696), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (1691, 1696), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2626, 2658), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (2645, 2658), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((3133, 3177), 'llama_index.set_global_handler', 'set_global_handler', (['eval_mode'], {}), '(eval_mode, **eval_params)\n', (3151, 3177), False, 'from llama_index import set_global_handler\n'), ((4433, 4464), 'llama_index.core.utils.set_global_tokenizer', 'set_global_tokenizer', (['tokenizer'], {}), '(tokenizer)\n', (4453, 4464), False, 'from llama_index.core.utils import get_tokenizer, set_global_tokenizer\n'), ((1414, 1436), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['"""default"""'], {}), "('default')\n", (1425, 1436), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2290, 2320), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['"""default"""'], {}), "('default')\n", (2309, 2320), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((3364, 3381), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (3379, 3381), False, 'from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager\n'), ((3846, 3861), 'llama_index.core.utils.get_tokenizer', 'get_tokenizer', ([], {}), '()\n', (3859, 3861), False, 'from llama_index.core.utils import get_tokenizer, set_global_tokenizer\n'), ((4655, 4673), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (4671, 4673), False, 'from llama_index.core.node_parser import NodeParser, SentenceSplitter\n'), ((6756, 6806), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', (['self._llm.metadata'], {}), '(self._llm.metadata)\n', (6786, 6806), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((4299, 4350), 'functools.partial', 'partial', (['tokenizer.encode'], {'add_special_tokens': '(False)'}), '(tokenizer.encode, add_special_tokens=False)\n', (4306, 4350), False, 'from functools import partial\n'), ((6883, 6897), 'llama_index.core.indices.prompt_helper.PromptHelper', 'PromptHelper', ([], {}), '()\n', (6895, 6897), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index", "import llama_index"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index", "from llama_index"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
with open(local_dir_path / "__init__.py", "r+") as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
# loads the module into memory
if override_path:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{base_file_name}"
)
if spec is None:
raise ValueError(f"Could not find file: {dirpath}/{base_file_name}.")
else:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{module_id}/{base_file_name}"
)
if spec is None:
raise ValueError(
f"Could not find file: {dirpath}/{module_id}/{base_file_name}."
)
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.download.utils.get_exports",
"llama_index.download.utils.initialize_directory"
] | [((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5360, 5393), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5374, 5393), False, 'import os\n'), ((7213, 7281), 'llama_index.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7233, 7281), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8659, 8686), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8680, 8686), False, 'from importlib import util\n'), ((1197, 1217), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1201, 1217), False, 'from pathlib import Path\n'), ((1434, 1468), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1448, 1468), False, 'import os\n'), ((1938, 1969), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1948, 1969), False, 'import json\n'), ((2263, 2298), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2278, 2298), False, 'import os\n'), ((3131, 3151), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3135, 3151), False, 'from pathlib import Path\n'), ((3347, 3386), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3358, 3386), False, 'import os\n'), ((4949, 4982), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (4963, 4982), False, 'import os\n'), ((8136, 8226), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{base_file_name}')\n", (8164, 8226), False, 'from importlib import util\n'), ((8376, 8478), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{module_id}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{module_id}/{base_file_name}')\n", (8404, 8478), False, 'from importlib import util\n'), ((9109, 9211), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9122, 9211), False, 'import requests\n'), ((1536, 1548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1545, 1548), False, 'import json\n'), ((3310, 3337), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3324, 3337), False, 'import os\n'), ((4385, 4420), 'llama_index.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4396, 4420), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2356, 2389), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2370, 2389), False, 'import os\n'), ((2407, 2437), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2418, 2437), False, 'import os\n'), ((5645, 5737), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5666, 5737), False, 'import subprocess\n'), ((5485, 5508), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5489, 5508), False, 'from pathlib import Path\n')] |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Coroutine
from langchain.llms.base import BaseLLM
from nemoguardrails import LLMRails, RailsConfig
COLANG_CONFIG = """
define user express greeting
"hi"
define user express ill intent
"I hate you"
"I want to destroy the world"
define bot express cannot respond
"I'm sorry I cannot help you with that."
define user express question
"What is the current unemployment rate?"
# Basic guardrail example
define flow
user express ill intent
bot express cannot respond
# Question answering flow
define flow
user ...
$answer = execute llama_index_query(query=$last_user_message)
bot $answer
"""
YAML_CONFIG = """
models:
- type: main
engine: openai
model: text-davinci-003
"""
def demo():
try:
import llama_index
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.response.schema import StreamingResponse
except ImportError:
raise ImportError(
"Could not import llama_index, please install it with "
"`pip install llama_index`."
)
config = RailsConfig.from_content(COLANG_CONFIG, YAML_CONFIG)
app = LLMRails(config)
def _get_llama_index_query_engine(llm: BaseLLM):
docs = llama_index.SimpleDirectoryReader(
input_files=["../examples/grounding_rail/kb/report.md"]
).load_data()
llm_predictor = llama_index.LLMPredictor(llm=llm)
index = llama_index.GPTVectorStoreIndex.from_documents(
docs, llm_predictor=llm_predictor
)
default_query_engine = index.as_query_engine()
return default_query_engine
def _get_callable_query_engine(
query_engine: BaseQueryEngine,
) -> Callable[[str], Coroutine[Any, Any, str]]:
async def get_query_response(query: str) -> str:
response = query_engine.query(query)
if isinstance(response, StreamingResponse):
typed_response = response.get_response()
else:
typed_response = response
response_str = typed_response.response
if response_str is None:
return ""
return response_str
return get_query_response
query_engine = _get_llama_index_query_engine(app.llm)
app.register_action(
_get_callable_query_engine(query_engine), name="llama_index_query"
)
history = [{"role": "user", "content": "What is the current unemployment rate?"}]
result = app.generate(messages=history)
print(result)
if __name__ == "__main__":
demo()
| [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.LLMPredictor"
] | [((1791, 1843), 'nemoguardrails.RailsConfig.from_content', 'RailsConfig.from_content', (['COLANG_CONFIG', 'YAML_CONFIG'], {}), '(COLANG_CONFIG, YAML_CONFIG)\n', (1815, 1843), False, 'from nemoguardrails import LLMRails, RailsConfig\n'), ((1854, 1870), 'nemoguardrails.LLMRails', 'LLMRails', (['config'], {}), '(config)\n', (1862, 1870), False, 'from nemoguardrails import LLMRails, RailsConfig\n'), ((2089, 2122), 'llama_index.LLMPredictor', 'llama_index.LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (2113, 2122), False, 'import llama_index\n'), ((2139, 2225), 'llama_index.GPTVectorStoreIndex.from_documents', 'llama_index.GPTVectorStoreIndex.from_documents', (['docs'], {'llm_predictor': 'llm_predictor'}), '(docs, llm_predictor=\n llm_predictor)\n', (2185, 2225), False, 'import llama_index\n'), ((1940, 2035), 'llama_index.SimpleDirectoryReader', 'llama_index.SimpleDirectoryReader', ([], {'input_files': "['../examples/grounding_rail/kb/report.md']"}), "(input_files=[\n '../examples/grounding_rail/kb/report.md'])\n", (1973, 2035), False, 'import llama_index\n')] |
import logging
from dataclasses import dataclass
from typing import Optional, Union
import llama_index
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.llm_predictor import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.llms.base import LLM
from llama_index.llms.utils import LLMType
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser
from llama_index.node_parser.simple import SimpleNodeParser
from llama_index.embeddings import (
DEFAULT_HUGGINGFACE_EMBEDDING_MODEL,
LangchainEmbedding,
)
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SimpleNodeParser.from_defaults(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
node_parser: NodeParser
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Union[BaseEmbedding, str]] = None,
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if isinstance(embed_model, str):
splits = embed_model.split(":", 1)
is_local = splits[0]
model_name = splits[1] if len(splits) > 1 else None
if is_local != "local":
raise ValueError(
"embed_model must start with str 'local' or of type BaseEmbedding"
)
try:
from langchain.embeddings import HuggingFaceEmbeddings
except ImportError as exc:
raise ImportError(
"Could not import sentence_transformers or langchain package. "
"Please install with `pip install sentence-transformers langchain`."
) from exc
embed_model = LangchainEmbedding(
HuggingFaceEmbeddings(
model_name=model_name or DEFAULT_HUGGINGFACE_EMBEDDING_MODEL
)
)
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
llama_logger=llama_logger,
callback_manager=callback_manager,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
)
callback_manager = callback_manager or CallbackManager([])
if llm is not None:
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or LLMPredictor()
llm_predictor.callback_manager = callback_manager
# NOTE: the embed_model isn't used in all indices
embed_model = embed_model or OpenAIEmbedding()
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLM] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm is not None:
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
llm_predictor.callback_manager = callback_manager
# NOTE: the embed_model isn't used in all indices
embed_model = embed_model or service_context.embed_model
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or service_context.node_parser
if chunk_size is not None or chunk_overlap is not None:
node_parser = _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
if not isinstance(self.llm_predictor, LLMPredictor):
raise ValueError("llm_predictor must be an instance of LLMPredictor")
return self.llm_predictor.llm
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.llm_predictor.LLMPredictor",
"llama_index.logger.LlamaLogger",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.callbacks.base.CallbackManager",
"llama_index.embeddings.openai.OpenAIEmbedding",
"llama_index.node_parser.simple.SimpleNodeParser.from_defaults"
] | [((809, 836), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (826, 836), False, 'import logging\n'), ((1067, 1189), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callback_manager': 'callback_manager'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, callback_manager=callback_manager)\n', (1097, 1189), False, 'from llama_index.node_parser.simple import SimpleNodeParser\n'), ((1592, 1649), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1622, 1649), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((5761, 5780), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (5776, 5780), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((5957, 5978), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (5969, 5978), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((6020, 6034), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {}), '()\n', (6032, 6034), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((6189, 6206), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (6204, 6206), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((6707, 6720), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (6718, 6720), False, 'from llama_index.logger import LlamaLogger\n'), ((8412, 8433), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (8424, 8433), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((5050, 5137), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '(model_name or DEFAULT_HUGGINGFACE_EMBEDDING_MODEL)'}), '(model_name=model_name or\n DEFAULT_HUGGINGFACE_EMBEDDING_MODEL)\n', (5071, 5137), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.bridge.pydantic import BaseModel # type: ignore
from llama_index.callbacks.schema import CBEventType, EventPayload
from llama_index.indices.query.schema import QueryBundle
from llama_index.prompts.mixin import PromptDictType
from llama_index.response.schema import Response
from llama_index.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.types import RESPONSE_TEXT_TYPE
from llama_index.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.response.schema.Response",
"llama_index.indices.query.schema.QueryBundle",
"llama_index.schema.TextNode",
"llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service"
] | [((1046, 1073), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1063, 1073), False, 'import logging\n'), ((2734, 2767), 'llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2765, 2767), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4819, 4868), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4823, 4868), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6762, 6788), 'llama_index.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6770, 6788), False, 'from llama_index.response.schema import Response\n'), ((6845, 6873), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6856, 6873), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((8284, 8306), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8292, 8306), False, 'from llama_index.schema import MetadataMode, NodeWithScore, TextNode\n')] |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-packs-rag-fusion-query-pipeline')
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt")
from llama_index.core import SimpleDirectoryReader
reader = SimpleDirectoryReader(input_files=["pg_essay.txt"])
docs = reader.load_data()
import phoenix as px
px.launch_app()
import llama_index.core
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.llama_pack import download_llama_pack
from llama_index.packs.rag_fusion_query_pipeline import RAGFusionPipelinePack
from llama_index.llms.openai import OpenAI
pack = RAGFusionPipelinePack(docs, llm=OpenAI(model="gpt-3.5-turbo"))
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(pack.query_pipeline.dag)
net.show("rag_dag.html")
response = pack.run(query="What did the author do growing up?")
print(str(response)) | [
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.openai.OpenAI"
] | [((432, 483), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['pg_essay.txt']"}), "(input_files=['pg_essay.txt'])\n", (453, 483), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((535, 550), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (548, 550), True, 'import phoenix as px\n'), ((933, 995), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (940, 995), False, 'from pyvis.network import Network\n'), ((858, 887), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (864, 887), False, 'from llama_index.llms.openai import OpenAI\n')] |
import uvicorn
import os
import logging
import llama_index
from typing import cast
from pathlib import Path
from fastapi.middleware.cors import CORSMiddleware
from fastapi import FastAPI
from dotenv import load_dotenv
from contextlib import asynccontextmanager
from firebase_admin import credentials, initialize_app
from app.db.pg_vector import CustomPGVectorStore, get_vector_store_singleton
from app.db.wait_for_db import check_database_connection
from app.api.api import api_router
from app.setup.service_context import initialize_llamaindex_service_context
from app.setup.tracing import initialize_tracing_service
load_dotenv()
cwd = Path.cwd()
# Default to 'development' if not set
environment = os.getenv("ENVIRONMENT", "dev")
@asynccontextmanager
async def lifespan(app: FastAPI):
# First wait for DB to be connectable.
await check_database_connection()
# Initialize pg vector store singleton.
vector_store = await get_vector_store_singleton()
vector_store = cast(CustomPGVectorStore, vector_store)
await vector_store.run_setup()
# Initialize firebase admin for authentication.
cred = credentials.Certificate(cwd / 'firebase_creds.json')
initialize_app(cred)
# if environment == "dev":
# # Initialize observability service.
# initialize_tracing_service("wandb", "talking-resume")
# Set global ServiceContext for LlamaIndex.
initialize_llamaindex_service_context(environment)
yield
# This section is run on app shutdown.
await vector_store.close()
app = FastAPI(lifespan=lifespan)
if environment == "dev":
# LLM debug.
llama_index.set_global_handler("simple")
logger = logging.getLogger("uvicorn")
logger.warning(
"Running in development mode - allowing CORS for all origins")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(api_router, prefix="/api")
if __name__ == "__main__":
uvicorn.run(app="main:app", host="0.0.0.0", reload=True)
| [
"llama_index.set_global_handler"
] | [((620, 633), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (631, 633), False, 'from dotenv import load_dotenv\n'), ((641, 651), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (649, 651), False, 'from pathlib import Path\n'), ((705, 736), 'os.getenv', 'os.getenv', (['"""ENVIRONMENT"""', '"""dev"""'], {}), "('ENVIRONMENT', 'dev')\n", (714, 736), False, 'import os\n'), ((1549, 1575), 'fastapi.FastAPI', 'FastAPI', ([], {'lifespan': 'lifespan'}), '(lifespan=lifespan)\n', (1556, 1575), False, 'from fastapi import FastAPI\n'), ((993, 1032), 'typing.cast', 'cast', (['CustomPGVectorStore', 'vector_store'], {}), '(CustomPGVectorStore, vector_store)\n', (997, 1032), False, 'from typing import cast\n'), ((1132, 1184), 'firebase_admin.credentials.Certificate', 'credentials.Certificate', (["(cwd / 'firebase_creds.json')"], {}), "(cwd / 'firebase_creds.json')\n", (1155, 1184), False, 'from firebase_admin import credentials, initialize_app\n'), ((1189, 1209), 'firebase_admin.initialize_app', 'initialize_app', (['cred'], {}), '(cred)\n', (1203, 1209), False, 'from firebase_admin import credentials, initialize_app\n'), ((1405, 1455), 'app.setup.service_context.initialize_llamaindex_service_context', 'initialize_llamaindex_service_context', (['environment'], {}), '(environment)\n', (1442, 1455), False, 'from app.setup.service_context import initialize_llamaindex_service_context\n'), ((1623, 1663), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (1653, 1663), False, 'import llama_index\n'), ((1678, 1706), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (1695, 1706), False, 'import logging\n'), ((2051, 2107), 'uvicorn.run', 'uvicorn.run', ([], {'app': '"""main:app"""', 'host': '"""0.0.0.0"""', 'reload': '(True)'}), "(app='main:app', host='0.0.0.0', reload=True)\n", (2062, 2107), False, 'import uvicorn\n'), ((847, 874), 'app.db.wait_for_db.check_database_connection', 'check_database_connection', ([], {}), '()\n', (872, 874), False, 'from app.db.wait_for_db import check_database_connection\n'), ((945, 973), 'app.db.pg_vector.get_vector_store_singleton', 'get_vector_store_singleton', ([], {}), '()\n', (971, 973), False, 'from app.db.pg_vector import CustomPGVectorStore, get_vector_store_singleton\n')] |
#!/usr/bin/env python
import os, sys
print("[INFO] Python", sys.version)
if "VIRTUAL_ENV" in os.environ:
print("[INFO] venv:", os.environ["VIRTUAL_ENV"])
if sys.version_info.major != 3 or sys.version_info.minor not in (8,9,10,11):
print("[WARNING] Unsupported python version!")
print("[INFO] Testing imports...")
try:
import llama_index, jupyterlab, loguru
except ImportError:
print("[ERROR] /!\ Could not import some requirements, make sure you've installed everything " \
"according to README.md")
print("[INFO] python path set to:", sys.path)
raise
print("[INFO] OK. Loading model...")
service_context = llama_index.ServiceContext.from_defaults(
embed_model="local:sentence-transformers/all-minilm-l6-v2", chunk_size=256, llm=None
)
print("[INFO] OK. Testing model...")
service_context.embed_model.get_text_embedding('Sphinx of black quartz, judge my vow')
print("All OK!")
| [
"llama_index.ServiceContext.from_defaults"
] | [((644, 775), 'llama_index.ServiceContext.from_defaults', 'llama_index.ServiceContext.from_defaults', ([], {'embed_model': '"""local:sentence-transformers/all-minilm-l6-v2"""', 'chunk_size': '(256)', 'llm': 'None'}), "(embed_model=\n 'local:sentence-transformers/all-minilm-l6-v2', chunk_size=256, llm=None)\n", (684, 775), False, 'import llama_index, jupyterlab, loguru\n')] |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix
from typing import Iterable
from random import randrange
LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab"
SESSION_CORPUS_ID_PREFIX = (
f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}"
)
def corpus_id(num_id: int) -> str:
return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}"
SESSION_CORPUS_ID = corpus_id(1)
def list_corpora() -> Iterable[genaix.Corpus]:
client = genaix.build_semantic_retriever()
yield from genaix.list_corpora(client=client)
def delete_corpus(*, corpus_id: str) -> None:
client = genaix.build_semantic_retriever()
genaix.delete_corpus(corpus_id=corpus_id, client=client)
def cleanup_colab_corpora():
for corpus in list_corpora():
if corpus.corpus_id.startswith(LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX):
try:
delete_corpus(corpus_id=corpus.corpus_id)
print(f"Deleted corpus {corpus.corpus_id}.")
except Exception:
pass
cleanup_colab_corpora()
from llama_index.core import SimpleDirectoryReader
from llama_index.indices.managed.google import GoogleIndex
from llama_index.core import Response
import time
index = GoogleIndex.create_corpus(
corpus_id=SESSION_CORPUS_ID, display_name="My first corpus!"
)
print(f"Newly created corpus ID is {index.corpus_id}.")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index.insert_documents(documents)
for corpus in list_corpora():
print(corpus)
query_engine = index.as_query_engine()
response = query_engine.query("What did Paul Graham do growing up?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
for cited_text in [node.text for node in response.source_nodes]:
print(f"Cited text: {cited_text}")
if response.metadata:
print(
f"Answerability: {response.metadata.get('answerable_probability', 0)}"
)
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
query_engine = index.as_query_engine()
response = query_engine.query("Which company did Paul Graham build?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
index.insert_nodes(
[
TextNode(
text="It was the best of times.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="123",
metadata={"file_name": "Tale of Two Cities"},
)
},
),
TextNode(
text="It was the worst of times.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="123",
metadata={"file_name": "Tale of Two Cities"},
)
},
),
TextNode(
text="Bugs Bunny: Wassup doc?",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="456",
metadata={"file_name": "Bugs Bunny Adventure"},
)
},
),
]
)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
HarmCategory,
SafetySetting,
)
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
query_engine = index.as_query_engine(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
SafetySetting(
category=HarmCategory.HARM_CATEGORY_VIOLENCE,
threshold=SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH,
),
],
)
response = query_engine.query("What was Bugs Bunny's favorite saying?")
print(response)
from llama_index.core import Response
response = query_engine.query("What were Paul Graham's achievements?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
for cited_text in [node.text for node in response.source_nodes]:
print(f"Cited text: {cited_text}")
if response.metadata:
print(
f"Answerability: {response.metadata.get('answerable_probability', 0)}"
)
from llama_index.llms.gemini import Gemini
GEMINI_API_KEY = "" # @param {type:"string"}
gemini = Gemini(api_key=GEMINI_API_KEY)
from llama_index.response_synthesizers.google import GoogleTextSynthesizer
from llama_index.vector_stores.google import GoogleVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
reranker = LLMRerank(
top_n=10,
llm=gemini,
)
query_engine = RetrieverQueryEngine.from_args(
retriever=VectorIndexRetriever(
index=index,
similarity_top_k=20,
),
node_postprocessors=[reranker],
response_synthesizer=response_synthesizer,
)
response = query_engine.query("What were Paul Graham's achievements?")
print(response)
from llama_index.core.indices.query.query_transform.base import (
StepDecomposeQueryTransform,
)
from llama_index.core.query_engine import MultiStepQueryEngine
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
single_step_query_engine = index.as_query_engine(
similarity_top_k=10,
response_synthesizer=response_synthesizer,
)
step_decompose_transform = StepDecomposeQueryTransform(
llm=gemini,
verbose=True,
)
query_engine = MultiStepQueryEngine(
query_engine=single_step_query_engine,
query_transform=step_decompose_transform,
response_synthesizer=response_synthesizer,
index_summary="Ask me anything.",
num_steps=6,
)
response = query_engine.query("What were Paul Graham's achievements?")
print(response)
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.core.query_engine import TransformQueryEngine
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
base_query_engine = index.as_query_engine(
similarity_top_k=10,
response_synthesizer=response_synthesizer,
)
hyde = HyDEQueryTransform(
llm=gemini,
include_original=False,
)
hyde_query_engine = TransformQueryEngine(base_query_engine, hyde)
response = query_engine.query("What were Paul Graham's achievements?")
print(response)
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
)
reranker = LLMRerank(
top_n=10,
llm=gemini,
)
single_step_query_engine = index.as_query_engine(
similarity_top_k=20,
node_postprocessors=[reranker],
response_synthesizer=response_synthesizer,
)
hyde = HyDEQueryTransform(
llm=gemini,
include_original=False,
)
hyde_query_engine = TransformQueryEngine(single_step_query_engine, hyde)
step_decompose_transform = StepDecomposeQueryTransform(
llm=gemini, verbose=True
)
query_engine = MultiStepQueryEngine(
query_engine=hyde_query_engine,
query_transform=step_decompose_transform,
response_synthesizer=response_synthesizer,
index_summary="Ask me anything.",
num_steps=6,
)
response = query_engine.query("What were Paul Graham's achievements?")
print(response)
cleanup_colab_corpora()
project_name = "TODO-your-project-name" # @param {type:"string"}
email = "[email protected]" # @param {type:"string"}
client_file_name = "client_secret.json"
get_ipython().system('gcloud config set project $project_name')
get_ipython().system('gcloud config set account $email')
get_ipython().system('gcloud auth application-default login --no-browser --client-id-file=$client_file_name --scopes="https://www.googleapis.com/auth/generative-language.retriever,https://www.googleapis.com/auth/cloud-platform"') | [
"llama_index.core.VectorStoreIndex.from_vector_store",
"llama_index.core.query_engine.MultiStepQueryEngine",
"llama_index.core.retrievers.VectorIndexRetriever",
"llama_index.indices.managed.google.GoogleIndex.create_corpus",
"llama_index.core.postprocessor.LLMRerank",
"llama_index.vector_stores.google.set_google_config",
"llama_index.indices.managed.google.GoogleIndex.from_corpus",
"llama_index.core.indices.query.query_transform.HyDEQueryTransform",
"llama_index.core.vector_stores.google.generativeai.genai_extension.build_semantic_retriever",
"llama_index.core.vector_stores.google.generativeai.genai_extension.list_corpora",
"llama_index.core.schema.RelatedNodeInfo",
"llama_index.response_synthesizers.google.GoogleTextSynthesizer.from_defaults",
"llama_index.core.vector_stores.google.generativeai.genai_extension.delete_corpus",
"llama_index.core.query_engine.TransformQueryEngine",
"llama_index.core.indices.query.query_transform.base.StepDecomposeQueryTransform",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.gemini.Gemini",
"llama_index.vector_stores.google.GoogleVectorStore.from_corpus"
] | [((665, 830), 'google.oauth2.service_account.Credentials.from_service_account_file', 'service_account.Credentials.from_service_account_file', (['"""service_account_key.json"""'], {'scopes': "['https://www.googleapis.com/auth/generative-language.retriever']"}), "(\n 'service_account_key.json', scopes=[\n 'https://www.googleapis.com/auth/generative-language.retriever'])\n", (718, 830), False, 'from google.oauth2 import service_account\n'), ((847, 894), 'llama_index.vector_stores.google.set_google_config', 'set_google_config', ([], {'auth_credentials': 'credentials'}), '(auth_credentials=credentials)\n', (864, 894), False, 'from llama_index.vector_stores.google import set_google_config\n'), ((2390, 2482), 'llama_index.indices.managed.google.GoogleIndex.create_corpus', 'GoogleIndex.create_corpus', ([], {'corpus_id': 'SESSION_CORPUS_ID', 'display_name': '"""My first corpus!"""'}), "(corpus_id=SESSION_CORPUS_ID, display_name=\n 'My first corpus!')\n", (2415, 2482), False, 'from llama_index.indices.managed.google import GoogleIndex\n'), ((3123, 3175), 'llama_index.indices.managed.google.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': 'SESSION_CORPUS_ID'}), '(corpus_id=SESSION_CORPUS_ID)\n', (3146, 3175), False, 'from llama_index.indices.managed.google import GoogleIndex\n'), ((3460, 3512), 'llama_index.indices.managed.google.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': 'SESSION_CORPUS_ID'}), '(corpus_id=SESSION_CORPUS_ID)\n', (3483, 3512), False, 'from llama_index.indices.managed.google import GoogleIndex\n'), ((4556, 4608), 'llama_index.indices.managed.google.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': 'SESSION_CORPUS_ID'}), '(corpus_id=SESSION_CORPUS_ID)\n', (4579, 4608), False, 'from llama_index.indices.managed.google import GoogleIndex\n'), ((5714, 5744), 'llama_index.llms.gemini.Gemini', 'Gemini', ([], {'api_key': 'GEMINI_API_KEY'}), '(api_key=GEMINI_API_KEY)\n', (5720, 5744), False, 'from llama_index.llms.gemini import Gemini\n'), ((6118, 6176), 'llama_index.vector_stores.google.GoogleVectorStore.from_corpus', 'GoogleVectorStore.from_corpus', ([], {'corpus_id': 'SESSION_CORPUS_ID'}), '(corpus_id=SESSION_CORPUS_ID)\n', (6147, 6176), False, 'from llama_index.vector_stores.google import GoogleVectorStore\n'), ((6185, 6239), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'store'}), '(vector_store=store)\n', (6219, 6239), False, 'from llama_index.core import VectorStoreIndex\n'), ((6270, 6387), 'llama_index.response_synthesizers.google.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {'temperature': '(0.2)', 'answer_style': 'GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE'}), '(temperature=0.2, answer_style=\n GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE)\n', (6305, 6387), False, 'from llama_index.response_synthesizers.google import GoogleTextSynthesizer\n'), ((6405, 6436), 'llama_index.core.postprocessor.LLMRerank', 'LLMRerank', ([], {'top_n': '(10)', 'llm': 'gemini'}), '(top_n=10, llm=gemini)\n', (6414, 6436), False, 'from llama_index.core.postprocessor import LLMRerank\n'), ((6937, 6995), 'llama_index.vector_stores.google.GoogleVectorStore.from_corpus', 'GoogleVectorStore.from_corpus', ([], {'corpus_id': 'SESSION_CORPUS_ID'}), '(corpus_id=SESSION_CORPUS_ID)\n', (6966, 6995), False, 'from llama_index.vector_stores.google import GoogleVectorStore\n'), ((7004, 7058), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'store'}), '(vector_store=store)\n', (7038, 7058), False, 'from llama_index.core import VectorStoreIndex\n'), ((7089, 7206), 'llama_index.response_synthesizers.google.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {'temperature': '(0.2)', 'answer_style': 'GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE'}), '(temperature=0.2, answer_style=\n GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE)\n', (7124, 7206), False, 'from llama_index.response_synthesizers.google import GoogleTextSynthesizer\n'), ((7364, 7417), 'llama_index.core.indices.query.query_transform.base.StepDecomposeQueryTransform', 'StepDecomposeQueryTransform', ([], {'llm': 'gemini', 'verbose': '(True)'}), '(llm=gemini, verbose=True)\n', (7391, 7417), False, 'from llama_index.core.indices.query.query_transform.base import StepDecomposeQueryTransform\n'), ((7444, 7644), 'llama_index.core.query_engine.MultiStepQueryEngine', 'MultiStepQueryEngine', ([], {'query_engine': 'single_step_query_engine', 'query_transform': 'step_decompose_transform', 'response_synthesizer': 'response_synthesizer', 'index_summary': '"""Ask me anything."""', 'num_steps': '(6)'}), "(query_engine=single_step_query_engine, query_transform\n =step_decompose_transform, response_synthesizer=response_synthesizer,\n index_summary='Ask me anything.', num_steps=6)\n", (7464, 7644), False, 'from llama_index.core.query_engine import MultiStepQueryEngine\n'), ((7900, 7958), 'llama_index.vector_stores.google.GoogleVectorStore.from_corpus', 'GoogleVectorStore.from_corpus', ([], {'corpus_id': 'SESSION_CORPUS_ID'}), '(corpus_id=SESSION_CORPUS_ID)\n', (7929, 7958), False, 'from llama_index.vector_stores.google import GoogleVectorStore\n'), ((7967, 8021), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'store'}), '(vector_store=store)\n', (8001, 8021), False, 'from llama_index.core import VectorStoreIndex\n'), ((8052, 8169), 'llama_index.response_synthesizers.google.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {'temperature': '(0.2)', 'answer_style': 'GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE'}), '(temperature=0.2, answer_style=\n GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE)\n', (8087, 8169), False, 'from llama_index.response_synthesizers.google import GoogleTextSynthesizer\n'), ((8300, 8354), 'llama_index.core.indices.query.query_transform.HyDEQueryTransform', 'HyDEQueryTransform', ([], {'llm': 'gemini', 'include_original': '(False)'}), '(llm=gemini, include_original=False)\n', (8318, 8354), False, 'from llama_index.core.indices.query.query_transform import HyDEQueryTransform\n'), ((8386, 8431), 'llama_index.core.query_engine.TransformQueryEngine', 'TransformQueryEngine', (['base_query_engine', 'hyde'], {}), '(base_query_engine, hyde)\n', (8406, 8431), False, 'from llama_index.core.query_engine import TransformQueryEngine\n'), ((8531, 8589), 'llama_index.vector_stores.google.GoogleVectorStore.from_corpus', 'GoogleVectorStore.from_corpus', ([], {'corpus_id': 'SESSION_CORPUS_ID'}), '(corpus_id=SESSION_CORPUS_ID)\n', (8560, 8589), False, 'from llama_index.vector_stores.google import GoogleVectorStore\n'), ((8598, 8652), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'store'}), '(vector_store=store)\n', (8632, 8652), False, 'from llama_index.core import VectorStoreIndex\n'), ((8683, 8800), 'llama_index.response_synthesizers.google.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {'temperature': '(0.2)', 'answer_style': 'GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE'}), '(temperature=0.2, answer_style=\n GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE)\n', (8718, 8800), False, 'from llama_index.response_synthesizers.google import GoogleTextSynthesizer\n'), ((8814, 8845), 'llama_index.core.postprocessor.LLMRerank', 'LLMRerank', ([], {'top_n': '(10)', 'llm': 'gemini'}), '(top_n=10, llm=gemini)\n', (8823, 8845), False, 'from llama_index.core.postprocessor import LLMRerank\n'), ((9025, 9079), 'llama_index.core.indices.query.query_transform.HyDEQueryTransform', 'HyDEQueryTransform', ([], {'llm': 'gemini', 'include_original': '(False)'}), '(llm=gemini, include_original=False)\n', (9043, 9079), False, 'from llama_index.core.indices.query.query_transform import HyDEQueryTransform\n'), ((9111, 9163), 'llama_index.core.query_engine.TransformQueryEngine', 'TransformQueryEngine', (['single_step_query_engine', 'hyde'], {}), '(single_step_query_engine, hyde)\n', (9131, 9163), False, 'from llama_index.core.query_engine import TransformQueryEngine\n'), ((9192, 9245), 'llama_index.core.indices.query.query_transform.base.StepDecomposeQueryTransform', 'StepDecomposeQueryTransform', ([], {'llm': 'gemini', 'verbose': '(True)'}), '(llm=gemini, verbose=True)\n', (9219, 9245), False, 'from llama_index.core.indices.query.query_transform.base import StepDecomposeQueryTransform\n'), ((9267, 9460), 'llama_index.core.query_engine.MultiStepQueryEngine', 'MultiStepQueryEngine', ([], {'query_engine': 'hyde_query_engine', 'query_transform': 'step_decompose_transform', 'response_synthesizer': 'response_synthesizer', 'index_summary': '"""Ask me anything."""', 'num_steps': '(6)'}), "(query_engine=hyde_query_engine, query_transform=\n step_decompose_transform, response_synthesizer=response_synthesizer,\n index_summary='Ask me anything.', num_steps=6)\n", (9287, 9460), False, 'from llama_index.core.query_engine import MultiStepQueryEngine\n'), ((1624, 1657), 'llama_index.core.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (1655, 1657), True, 'import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix\n'), ((1769, 1802), 'llama_index.core.vector_stores.google.generativeai.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (1800, 1802), True, 'import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix\n'), ((1807, 1863), 'llama_index.core.vector_stores.google.generativeai.genai_extension.delete_corpus', 'genaix.delete_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (1827, 1863), True, 'import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix\n'), ((1417, 1435), 'random.randrange', 'randrange', (['(1000000)'], {}), '(1000000)\n', (1426, 1435), False, 'from random import randrange\n'), ((1673, 1707), 'llama_index.core.vector_stores.google.generativeai.genai_extension.list_corpora', 'genaix.list_corpora', ([], {'client': 'client'}), '(client=client)\n', (1692, 1707), True, 'import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix\n'), ((2553, 2597), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data/paul_graham/"""'], {}), "('./data/paul_graham/')\n", (2574, 2597), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((6509, 6563), 'llama_index.core.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(20)'}), '(index=index, similarity_top_k=20)\n', (6529, 6563), False, 'from llama_index.core.retrievers import VectorIndexRetriever\n'), ((4761, 4897), 'google.ai.generativelanguage.SafetySetting', 'SafetySetting', ([], {'category': 'HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT', 'threshold': 'SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE'}), '(category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,\n threshold=SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE)\n', (4774, 4897), False, 'from google.ai.generativelanguage import GenerateAnswerRequest, HarmCategory, SafetySetting\n'), ((4938, 5062), 'google.ai.generativelanguage.SafetySetting', 'SafetySetting', ([], {'category': 'HarmCategory.HARM_CATEGORY_VIOLENCE', 'threshold': 'SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH'}), '(category=HarmCategory.HARM_CATEGORY_VIOLENCE, threshold=\n SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH)\n', (4951, 5062), False, 'from google.ai.generativelanguage import GenerateAnswerRequest, HarmCategory, SafetySetting\n'), ((3672, 3748), 'llama_index.core.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '"""123"""', 'metadata': "{'file_name': 'Tale of Two Cities'}"}), "(node_id='123', metadata={'file_name': 'Tale of Two Cities'})\n", (3687, 3748), False, 'from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode\n'), ((3968, 4044), 'llama_index.core.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '"""123"""', 'metadata': "{'file_name': 'Tale of Two Cities'}"}), "(node_id='123', metadata={'file_name': 'Tale of Two Cities'})\n", (3983, 4044), False, 'from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode\n'), ((4261, 4339), 'llama_index.core.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '"""456"""', 'metadata': "{'file_name': 'Bugs Bunny Adventure'}"}), "(node_id='456', metadata={'file_name': 'Bugs Bunny Adventure'})\n", (4276, 4339), False, 'from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode\n')] |
import logging
from dataclasses import dataclass
from typing import List, Optional
import llama_index
from llama_index.bridge.pydantic import BaseModel
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.utils import EmbedType, resolve_embed_model
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.llm_predictor import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.llms.base import LLM
from llama_index.llms.utils import LLMType, resolve_llm
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser, TextSplitter
from llama_index.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.prompts.base import BasePromptTemplate
from llama_index.schema import TransformComponent
from llama_index.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm=llm,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
context_window=context_window,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
chunk_overlap=chunk_overlap,
num_output=num_output,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
transformations=transformations,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[EmbedType] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
if not isinstance(self.llm_predictor, LLMPredictor):
raise ValueError("llm_predictor must be an instance of LLMPredictor")
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.embeddings.loading import load_embed_model
from llama_index.extractors.loading import load_extractor
from llama_index.llm_predictor.loading import load_predictor
from llama_index.node_parser.loading import load_parser
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [
"llama_index.llms.utils.resolve_llm",
"llama_index.node_parser.loading.load_parser",
"llama_index.extractors.loading.load_extractor",
"llama_index.embeddings.loading.load_embed_model",
"llama_index.llm_predictor.LLMPredictor",
"llama_index.logger.LlamaLogger",
"llama_index.embeddings.utils.resolve_embed_model",
"llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.callbacks.base.CallbackManager",
"llama_index.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.llm_predictor.loading.load_predictor"
] | [((1019, 1046), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1036, 1046), False, 'import logging\n'), ((1821, 1878), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1851, 1878), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((7115, 7147), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7134, 7147), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10676, 10708), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (10695, 10708), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((13993, 14043), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14007, 14043), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((14067, 14117), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14083, 14117), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((14143, 14201), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14165, 14201), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6249, 6268), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6264, 6268), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6436, 6452), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6447, 6452), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((6494, 6560), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (6506, 6560), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((8023, 8036), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8034, 8036), False, 'from llama_index.logger import LlamaLogger\n'), ((9971, 9987), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (9982, 9987), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((10016, 10037), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10028, 10037), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((1420, 1437), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1435, 1437), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((14377, 14399), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (14388, 14399), False, 'from llama_index.node_parser.loading import load_parser\n'), ((14471, 14496), 'llama_index.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (14485, 14496), False, 'from llama_index.extractors.loading import load_extractor\n')] |
import logging
from dataclasses import dataclass
from typing import Any, List, Optional, cast
from deprecated import deprecated
import llama_index.core
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.indices.prompt_helper import PromptHelper
from llama_index.core.service_context_elements.llm_predictor import (
LLMPredictor,
BaseLLMPredictor,
)
from llama_index.core.llms.base import LLMMetadata
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.utils import LLMType, resolve_llm
from llama_index.core.service_context_elements.llama_logger import LlamaLogger
from llama_index.core.node_parser.interface import NodeParser, TextSplitter
from llama_index.core.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.core.prompts.base import BasePromptTemplate
from llama_index.core.schema import TransformComponent
from llama_index.core.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
@deprecated(
version="0.10.0",
reason="ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.",
)
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.core.global_service_context is not None:
return cls.from_service_context(
llama_index.core.global_service_context,
llm=llm,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
context_window=context_window,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
chunk_overlap=chunk_overlap,
num_output=num_output,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
transformations=transformations,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm.system_prompt = llm.system_prompt or system_prompt
llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt
llm.pydantic_program_mode = (
llm.pydantic_program_mode or pydantic_program_mode
)
if llm_predictor is not None:
print("LLMPredictor is deprecated, please use LLM instead.")
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.core.embeddings.loading import load_embed_model
from llama_index.core.extractors.loading import load_extractor
from llama_index.core.node_parser.loading import load_parser
from llama_index.core.service_context_elements.llm_predictor import (
load_predictor,
)
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.core.global_service_context = service_context
if service_context is not None:
from llama_index.core.settings import Settings
Settings.llm = service_context.llm
Settings.embed_model = service_context.embed_model
Settings.prompt_helper = service_context.prompt_helper
Settings.transformations = service_context.transformations
Settings.node_parser = service_context.node_parser
Settings.callback_manager = service_context.callback_manager
| [
"llama_index.core.llms.utils.resolve_llm",
"llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.core.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.core.embeddings.utils.resolve_embed_model",
"llama_index.core.embeddings.loading.load_embed_model",
"llama_index.core.service_context_elements.llm_predictor.LLMPredictor",
"llama_index.core.callbacks.base.CallbackManager",
"llama_index.core.node_parser.loading.load_parser",
"llama_index.core.extractors.loading.load_extractor",
"llama_index.core.service_context_elements.llama_logger.LlamaLogger",
"llama_index.core.service_context_elements.llm_predictor.load_predictor"
] | [((1132, 1159), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1149, 1159), False, 'import logging\n'), ((1934, 1991), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1964, 1991), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((2811, 2941), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""0.10.0"""', 'reason': '"""ServiceContext is deprecated, please use `llama_index.settings.Settings` instead."""'}), "(version='0.10.0', reason=\n 'ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.'\n )\n", (2821, 2941), False, 'from deprecated import deprecated\n'), ((5452, 5480), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (5456, 5480), False, 'from typing import Any, List, Optional, cast\n'), ((7909, 7941), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7928, 7941), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10358, 10386), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (10362, 10386), False, 'from typing import Any, List, Optional, cast\n'), ((11602, 11634), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11621, 11634), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14838, 14888), 'llama_index.core.service_context_elements.llm_predictor.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14852, 14888), False, 'from llama_index.core.service_context_elements.llm_predictor import load_predictor\n'), ((14912, 14962), 'llama_index.core.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14928, 14962), False, 'from llama_index.core.embeddings.loading import load_embed_model\n'), ((14988, 15046), 'llama_index.core.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (15010, 15046), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((6653, 6672), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6668, 6672), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((6840, 6856), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6851, 6856), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((7288, 7354), 'llama_index.core.service_context_elements.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (7300, 7354), False, 'from llama_index.core.service_context_elements.llm_predictor import LLMPredictor, BaseLLMPredictor\n'), ((8817, 8830), 'llama_index.core.service_context_elements.llama_logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8828, 8830), False, 'from llama_index.core.service_context_elements.llama_logger import LlamaLogger\n'), ((10897, 10913), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10908, 10913), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((10942, 10963), 'llama_index.core.service_context_elements.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10954, 10963), False, 'from llama_index.core.service_context_elements.llm_predictor import LLMPredictor, BaseLLMPredictor\n'), ((1533, 1550), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1548, 1550), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((15222, 15244), 'llama_index.core.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (15233, 15244), False, 'from llama_index.core.node_parser.loading import load_parser\n'), ((15316, 15341), 'llama_index.core.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (15330, 15341), False, 'from llama_index.core.extractors.loading import load_extractor\n')] |
import itertools
import logging
from os import path
from typing import List, Sequence
import llama_index.vector_stores
import ray
from kfp import compiler, dsl
from langchain.embeddings.fake import FakeEmbeddings
from llama_index import ServiceContext, StorageContext, VectorStoreIndex
from llama_index.data_structs import IndexDict
from llama_index.llms import MockLLM
import vectorize_fileset
logging.basicConfig(level=logging.INFO)
## The default concurrency for the number of concurrent
## ray tasks
DEFAULT_CPU_CONCURRENCY = 150
DEFAULT_GPU_CONCURRENCY = 10
## The largest number of tasks we'll wait for at a time
READY_BATCH_SIZE = 1
def get_fs(url: str):
import fsspec
return fsspec.filesystem(url.split("://", 1)[0])
def url_as_path(url: str) -> str:
"""Converts a URL to a path."""
return url.split("://", 1)[-1]
def persist_nodes(nodes: List, vectordb_cls: str, vectordb_kwargs: dict):
if vectordb_cls is None:
logging.warn("Unable to persist nodes, there is no vector store specified")
return
if len(nodes) == 0:
return
cls = getattr(llama_index.vector_stores, vectordb_cls)
vectordb_kwargs["dim"] = len(nodes[0].embedding)
vector_store = cls(**vectordb_kwargs)
service_context = ServiceContext.from_defaults(
llm=MockLLM(), embed_model=FakeEmbeddings(size=len(nodes[0].embedding))
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
vector_store_index = VectorStoreIndex(
storage_context=storage_context,
index_struct=IndexDict(),
service_context=service_context,
)
logging.info(f"Persisting {len(nodes)} nodes to vector store")
vector_store_index.insert_nodes(nodes)
def partition(lst: Sequence, size: int):
for i in range(0, len(lst), size):
yield lst[i : i + size]
def ray_vectorize_dataset(
ray_address: str,
root_uri: str,
files: List[str],
batch_size=1000,
vectordb_cls: str = None,
vectordb_kwargs: dict = None,
concurrency: int = None,
use_gpu: bool = False,
):
runtime_env = {
"working_dir": ".",
"py_modules": [vectorize_fileset],
"conda": {
"dependencies": [
"pip",
{
"pip": [
"gcsfs~=2023.9",
"s3fs~=2023.9",
"fsspec~=2023.9",
"llama_index~=0.8.29",
"langchain~=0.0.298",
"sentence-transformers~=2.2",
"nltk",
]
},
],
},
}
ray.init(address=ray_address, runtime_env=runtime_env)
num_cpus = 2 if not use_gpu else 1
num_gpus = 1 if use_gpu else 0
## Make remote versions of the functions we'll need
remote_vectorize_fileset = ray.remote(vectorize_fileset.vectorize_fileset)
remote_vectorize_fileset = remote_vectorize_fileset.options(
num_cpus=num_cpus, num_gpus=num_gpus
)
if concurrency is None:
concurrency = DEFAULT_GPU_CONCURRENCY if use_gpu else DEFAULT_CPU_CONCURRENCY
## Partition the file lists into batches and submit them to ray
result_refs = []
for p in partition(files, size=batch_size):
results = None
if len(result_refs) >= concurrency:
ready_refs, result_refs = ray.wait(
result_refs, num_returns=min(READY_BATCH_SIZE, len(result_refs))
)
results = ray.get(ready_refs)
result_refs.append(remote_vectorize_fileset.remote(root_uri, p))
if results:
persist_nodes(
list(itertools.chain(*results)),
vectordb_cls=vectordb_cls,
vectordb_kwargs=vectordb_kwargs,
)
while result_refs:
ready_refs, result_refs = ray.wait(
result_refs, num_returns=min(READY_BATCH_SIZE, len(result_refs))
)
results = ray.get(ready_refs)
persist_nodes(
list(itertools.chain(*results)),
vectordb_cls=vectordb_cls,
vectordb_kwargs=vectordb_kwargs,
)
@dsl.component(
target_image="us-central1-docker.pkg.dev/kflow-artifacts/kfp-components/kfp-vectorize-dataset:latest",
base_image="python:3.10-slim",
packages_to_install=[
"ray[client]~=2.7",
"gcsfs~=2023.9",
"s3fs~=2023.9",
"fsspec~=2023.9",
"llama_index~=0.8.29",
"pymilvus~=2.3",
],
)
def vectorize_dataset(
dataset_url: str,
vectordb_cls: str,
vectordb_kwargs: dict,
ray_address: str,
batch_size: int,
concurrency: int,
use_gpu: bool,
):
"""
Vectorizes each file ina dataset and persists them to a datastore
If `ray_address` is provided, then the component will use ray tasks to vectorize batches
of the files in parallel. Otherwise, it will vectorize the files sequentially.
Args:
dataset_url: The URL of the dataset to vectorize. This should be a directory of separate documents.
All files in the directory and any subdirectory will be vectorized. The URL should be in the form
of a supported fsspec URL (e.g. `gs://` for Google Cloud Storage, `s3://` for S3, etc.)
vectordb_cls: The class of the vector store to persist the vectors to. This should be a class from
`llama_index.vector_stores`. If `None`, then the vectors will not be persisted.
vectordb_kwargs: The keyword arguments to pass to the vector store class constructor.
ray_address: The address of the ray cluster to use for parallelization. If `None`, then the files
will be vectorized sequentially.
batch_size: The number of files to vectorize in each batch. This is only used if `ray_address` is
provided.
concurrency: The maximum number of concurrent ray tasks to run. This is only used if `ray_address`
is provided.
"""
fs = get_fs(dataset_url)
dataset_path = url_as_path(dataset_url)
dataset_path = dataset_path.rstrip("/") + "/" ## Ensure the path ends with a slash
all_files = list(
itertools.chain(
*[
[path.join(dirpath.replace(dataset_path, ""), f) for f in files]
for dirpath, _, files in fs.walk(dataset_path)
]
)
)
if ray_address is not None:
ray_vectorize_dataset(
ray_address,
dataset_url,
all_files,
vectordb_cls=vectordb_cls,
vectordb_kwargs=vectordb_kwargs,
batch_size=batch_size,
concurrency=concurrency,
use_gpu=use_gpu,
)
else:
nodes = vectorize_fileset(dataset_url, all_files)
persist_nodes(nodes, vectordb_cls=vectordb_cls, vectordb_kwargs=vectordb_kwargs)
if __name__ == "__main__":
compiler.Compiler().compile(
vectorize_dataset, path.join(path.dirname(__file__), "..", "component.yaml")
)
| [
"llama_index.data_structs.IndexDict",
"llama_index.StorageContext.from_defaults",
"llama_index.llms.MockLLM"
] | [((397, 436), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (416, 436), False, 'import logging\n'), ((4202, 4501), 'kfp.dsl.component', 'dsl.component', ([], {'target_image': '"""us-central1-docker.pkg.dev/kflow-artifacts/kfp-components/kfp-vectorize-dataset:latest"""', 'base_image': '"""python:3.10-slim"""', 'packages_to_install': "['ray[client]~=2.7', 'gcsfs~=2023.9', 's3fs~=2023.9', 'fsspec~=2023.9',\n 'llama_index~=0.8.29', 'pymilvus~=2.3']"}), "(target_image=\n 'us-central1-docker.pkg.dev/kflow-artifacts/kfp-components/kfp-vectorize-dataset:latest'\n , base_image='python:3.10-slim', packages_to_install=[\n 'ray[client]~=2.7', 'gcsfs~=2023.9', 's3fs~=2023.9', 'fsspec~=2023.9',\n 'llama_index~=0.8.29', 'pymilvus~=2.3'])\n", (4215, 4501), False, 'from kfp import compiler, dsl\n'), ((1410, 1465), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1438, 1465), False, 'from llama_index import ServiceContext, StorageContext, VectorStoreIndex\n'), ((2680, 2734), 'ray.init', 'ray.init', ([], {'address': 'ray_address', 'runtime_env': 'runtime_env'}), '(address=ray_address, runtime_env=runtime_env)\n', (2688, 2734), False, 'import ray\n'), ((2898, 2945), 'ray.remote', 'ray.remote', (['vectorize_fileset.vectorize_fileset'], {}), '(vectorize_fileset.vectorize_fileset)\n', (2908, 2945), False, 'import ray\n'), ((962, 1037), 'logging.warn', 'logging.warn', (['"""Unable to persist nodes, there is no vector store specified"""'], {}), "('Unable to persist nodes, there is no vector store specified')\n", (974, 1037), False, 'import logging\n'), ((4017, 4036), 'ray.get', 'ray.get', (['ready_refs'], {}), '(ready_refs)\n', (4024, 4036), False, 'import ray\n'), ((6788, 6829), 'vectorize_fileset', 'vectorize_fileset', (['dataset_url', 'all_files'], {}), '(dataset_url, all_files)\n', (6805, 6829), False, 'import vectorize_fileset\n'), ((1314, 1323), 'llama_index.llms.MockLLM', 'MockLLM', ([], {}), '()\n', (1321, 1323), False, 'from llama_index.llms import MockLLM\n'), ((1571, 1582), 'llama_index.data_structs.IndexDict', 'IndexDict', ([], {}), '()\n', (1580, 1582), False, 'from llama_index.data_structs import IndexDict\n'), ((3548, 3567), 'ray.get', 'ray.get', (['ready_refs'], {}), '(ready_refs)\n', (3555, 3567), False, 'import ray\n'), ((6952, 6971), 'kfp.compiler.Compiler', 'compiler.Compiler', ([], {}), '()\n', (6969, 6971), False, 'from kfp import compiler, dsl\n'), ((7018, 7040), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (7030, 7040), False, 'from os import path\n'), ((4077, 4102), 'itertools.chain', 'itertools.chain', (['*results'], {}), '(*results)\n', (4092, 4102), False, 'import itertools\n'), ((3710, 3735), 'itertools.chain', 'itertools.chain', (['*results'], {}), '(*results)\n', (3725, 3735), False, 'import itertools\n')] |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for configuring objects used to create OpenTelemetry traces."""
import os
from opentelemetry import trace, context
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
from opentelemetry.propagate import set_global_textmap, get_global_textmap
from opentelemetry.propagators.composite import CompositePropagator
from tools.observability.llamaindex import opentelemetry_callback
import llama_index
from llama_index.callbacks.base import CallbackManager
from functools import wraps
# Configure tracer used by the Chain Server to create spans
resource = Resource.create({SERVICE_NAME: "chain-server"})
provider = TracerProvider(resource=resource)
if os.environ.get("ENABLE_TRACING") == "true":
processor = SimpleSpanProcessor(OTLPSpanExporter())
provider.add_span_processor(processor)
trace.set_tracer_provider(provider)
tracer = trace.get_tracer("chain-server")
# Configure Propagator used for processing trace context received by the Chain Server
if os.environ.get("ENABLE_TRACING") == "true":
propagator = TraceContextTextMapPropagator()
# Llamaindex global handler set to pass callbacks into the OpenTelemetry handler
llama_index.global_handler = opentelemetry_callback.OpenTelemetryCallbackHandler(tracer)
else:
propagator = CompositePropagator([]) # No-op propagator
set_global_textmap(propagator)
# Wrapper Function to perform instrumentation
def instrumentation_wrapper(func):
@wraps(func)
async def wrapper(*args, **kwargs):
request = kwargs.get("request")
prompt = kwargs.get("prompt")
ctx = get_global_textmap().extract(request.headers)
if ctx is not None:
context.attach(ctx)
if prompt is not None and prompt.use_knowledge_base == False:
# Hack to get the LLM event for no knowledge base queries to show up.
# A trace is not generated by Llamaindex for these calls so we need to generate it instead.
callback_manager = CallbackManager([])
with callback_manager.as_trace("query"):
result = func(*args, **kwargs)
else:
result = func(*args, **kwargs)
return await result
return wrapper
| [
"llama_index.callbacks.base.CallbackManager"
] | [((1536, 1583), 'opentelemetry.sdk.resources.Resource.create', 'Resource.create', (["{SERVICE_NAME: 'chain-server'}"], {}), "({SERVICE_NAME: 'chain-server'})\n", (1551, 1583), False, 'from opentelemetry.sdk.resources import SERVICE_NAME, Resource\n'), ((1595, 1628), 'opentelemetry.sdk.trace.TracerProvider', 'TracerProvider', ([], {'resource': 'resource'}), '(resource=resource)\n', (1609, 1628), False, 'from opentelemetry.sdk.trace import TracerProvider\n'), ((1775, 1810), 'opentelemetry.trace.set_tracer_provider', 'trace.set_tracer_provider', (['provider'], {}), '(provider)\n', (1800, 1810), False, 'from opentelemetry import trace, context\n'), ((1820, 1852), 'opentelemetry.trace.get_tracer', 'trace.get_tracer', (['"""chain-server"""'], {}), "('chain-server')\n", (1836, 1852), False, 'from opentelemetry import trace, context\n'), ((2280, 2310), 'opentelemetry.propagate.set_global_textmap', 'set_global_textmap', (['propagator'], {}), '(propagator)\n', (2298, 2310), False, 'from opentelemetry.propagate import set_global_textmap, get_global_textmap\n'), ((1632, 1664), 'os.environ.get', 'os.environ.get', (['"""ENABLE_TRACING"""'], {}), "('ENABLE_TRACING')\n", (1646, 1664), False, 'import os\n'), ((1943, 1975), 'os.environ.get', 'os.environ.get', (['"""ENABLE_TRACING"""'], {}), "('ENABLE_TRACING')\n", (1957, 1975), False, 'import os\n'), ((2004, 2035), 'opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator', 'TraceContextTextMapPropagator', ([], {}), '()\n', (2033, 2035), False, 'from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator\n'), ((2154, 2213), 'tools.observability.llamaindex.opentelemetry_callback.OpenTelemetryCallbackHandler', 'opentelemetry_callback.OpenTelemetryCallbackHandler', (['tracer'], {}), '(tracer)\n', (2205, 2213), False, 'from tools.observability.llamaindex import opentelemetry_callback\n'), ((2237, 2260), 'opentelemetry.propagators.composite.CompositePropagator', 'CompositePropagator', (['[]'], {}), '([])\n', (2256, 2260), False, 'from opentelemetry.propagators.composite import CompositePropagator\n'), ((2398, 2409), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2403, 2409), False, 'from functools import wraps\n'), ((1712, 1730), 'opentelemetry.exporter.otlp.proto.grpc.trace_exporter.OTLPSpanExporter', 'OTLPSpanExporter', ([], {}), '()\n', (1728, 1730), False, 'from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter\n'), ((2628, 2647), 'opentelemetry.context.attach', 'context.attach', (['ctx'], {}), '(ctx)\n', (2642, 2647), False, 'from opentelemetry import trace, context\n'), ((2935, 2954), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2950, 2954), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((2542, 2562), 'opentelemetry.propagate.get_global_textmap', 'get_global_textmap', ([], {}), '()\n', (2560, 2562), False, 'from opentelemetry.propagate import set_global_textmap, get_global_textmap\n')] |
import json
import os
import time
import fitz # PyMuPDF
import llama_index
import openai
import weaviate
from weaviate.gql.get import HybridFusion
from unstructured.cleaners.core import clean
from llama_index.vector_stores import WeaviateVectorStore
from llama_index import VectorStoreIndex, ServiceContext, set_global_service_context
from llama_index.response.pprint_utils import pprint_source_node
import os
import weaviate
from langchain.document_loaders import GutenbergLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Weaviate
# Grimms' Fairy Tales by Jacob Grimm and Wilhelm Grimm
loader = GutenbergLoader("https://www.gutenberg.org/files/2591/2591-0.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(
chunk_size=500, chunk_overlap=0, length_function=len
)
docs = text_splitter.split_documents(documents)
WEAVIATE_URL = "http://weaviate:8080"
client = weaviate.Client(
url=WEAVIATE_URL,
additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]},
)
client.schema.delete_all()
client.schema.get()
schema = {
"classes": [
{
"class": "Test",
"description": "A written paragraph",
"vectorizer": "text2vec-openai",
"moduleConfig": {"text2vec-openai": {"model": "ada", "type": "text"}},
},
]
}
client.schema.create(schema)
vectorstore = Weaviate(client, "Paragraph", "content")
text_meta_pair = [(doc.page_content, doc.metadata) for doc in docs]
texts, meta = list(zip(*text_meta_pair))
vectorstore.add_texts(texts, meta)
query = "the part where with talking animals"
docs = vectorstore.similarity_search(query)
for doc in docs:
print(doc.page_content)
print("*" * 80)
azure_openai_key = os.getenv("AZURE_OPENAI_KEY")
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
resource_name = os.getenv("RESOURCE_NAME")
azure_client = openai.lib.azure.AzureOpenAI(
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
api_key=os.getenv("AZURE_OPENAI_KEY"),
api_version="2023-05-15"
)
headers = {
"X-Azure-Api-Key": azure_openai_key,
}
def query_openai(messages):
return azure_client.chat.completions.create(
model="gpt-35-16k", # model = "deployment_name".
messages=messages
)
def prompt(query):
return f""" You are a university professor.
Answer the following question using only the provided context.
If you can't find the answer, do not pretend you know it, ask for more information"
Answer in the same langauge as the question. If you used your own knowledge apart from the context provided mention that.
Question: {query} """
def chunk_files(subdirectory_path, subdirectory):
data = []
# Process each PDF file in this subdirectory
for filename in os.listdir(subdirectory_path):
if filename.endswith('.pdf'):
file_path = os.path.join(subdirectory_path, filename)
str_five = ""
# Open the PDF
with fitz.open(file_path) as doc:
for page_num in range(len(doc)):
page_text = doc[page_num].get_text()
page_text = clean(page_text, bullets=True, extra_whitespace=True)
slide_id = filename + str(page_num)
if page_num % 5 == 0:
if page_num != 0: # Avoid appending empty content for the first page
data.append({
"content": str_five,
"slide_id": slide_id,
"page_interval": str(str(page_num - 5) + "->" + str(page_num)),
"lecture_id": subdirectory # Save the subdirectory name
})
last_page = doc[page_num - 1].get_text() if page_num > 0 else ""
last_page = clean(last_page, bullets=True, extra_whitespace=True)
str_five = last_page + page_text
else:
str_five += "\n\n" + page_text
# Append the last accumulated text if it's not empty
if str_five:
data.append({
"content": str_five,
"slide_id": subdirectory_path + str(len(doc)),
"page_interval": str(str(len(doc) - 10) + "->" + str(len(doc))),
"lecture_id": subdirectory # Save the subdirectory name
})
return data
class AI:
def __init__(self):
api_key_header = {
"X-Azure-Api-Key": azure_openai_key, # Replace with your inference API key
}
self.client = weaviate.Client(
url="http://localhost:8080", # Replace with your endpoint
additional_headers=api_key_header
)
def create_class(self):
t2v = {
"model": "ada",
"modelVersion": "002",
"type": "text",
"baseURL": azure_endpoint,
"resourceName": resource_name,
"deploymentId": "te-ada-002",
}
self.client.schema.delete_class("Lectures")
if not self.client.schema.exists("Lectures"):
class_obj = {
"class": "Lectures",
"vectorizer": "text2vec-openai",
"properties": [
{
"dataType": ["text"],
"name": "content",
"moduleConfig": {
"text2vec-openai": {
"vectorizePropertyName": False
}
},
},
{
"dataType": ["text"],
"name": "slide_id",
"moduleConfig": {
"text2vec-openai": {
"vectorizePropertyName": False
}
},
},
{
"dataType": ["text"],
"name": "page_interval",
"moduleConfig": {
"text2vec-openai": {
"vectorizePropertyName": False
}
},
},
{
"dataType": ["text"],
"name": "lecture_id",
"moduleConfig": {
"text2vec-openai": {
"vectorizePropertyName": False
}
},
}
],
"moduleConfig": {
"text2vec-openai": t2v,
"generative-openai": {
"baseURL": azure_endpoint,
"resourceName": resource_name,
"deploymentId": "gpt-35-16k",
"waitForModel": True,
"useGPU": False,
"useCache": True
}
},
}
self.client.schema.create_class(class_obj)
print("Schema created")
directory_path = "../../lectures"
print("Importing data into the batch")
# Iterate through each subdirectory in the root directory
for subdirectory in os.listdir(directory_path):
subdirectory_path = os.path.join(directory_path, subdirectory)
if os.path.isdir(subdirectory_path):
self.batch_import(subdirectory_path, subdirectory)
print("Import Finished")
def batch_import(self, directory_path, subdirectory):
data = chunk_files(directory_path, subdirectory)
# Configure a batch process
self.client.batch.configure(
# `batch_size` takes an `int` value to enable auto-batching
# dynamically update the `batch_size` based on import speed
dynamic=True,
timeout_retries=0
)
with self.client.batch as batch:
# Batch import all Questions
for i, d in enumerate(data):
embeddings_created = False
properties = {
"content": d["content"],
"slide_id": d["slide_id"],
"page_interval": d["page_interval"],
"lecture_id": d["lecture_id"]
}
# Initialize the flag
embeddings_created = False
# create embeddings (exponential backoff to avoid RateLimitError)
for j in range(5): # max 5 retries
# Only attempt to create embeddings if not already created
if not embeddings_created:
try:
batch.add_data_object(
properties,
"Lectures"
)
embeddings_created = True # Set flag to True on success
break # Break the loop as embedding creation was successful
except openai.error.RateLimitError:
time.sleep(2 ** j) # wait 2^j seconds before retrying
print("Retrying import...")
else:
break # Exit loop if embeddings already created
# Raise an error if embeddings were not created after retries
if not embeddings_created:
raise RuntimeError("Failed to create embeddings.")
def generate_response(self, user_message, lecture_id):
completion = query_openai(messages=[{
"role": "user",
"content": f"""
Please give back lecture content that can answer this inquiry:
Do not add anything else.
"{user_message}".\n
"""}])
generated_lecture = completion.choices[0].message.content
if lecture_id == "CIT5230000":
llm = llama_index.llms.AzureOpenAI(model="gpt-35-turbo-16k", deployment_name="gpt-35-16k",
api_key=azure_openai_key, azure_endpoint=azure_endpoint,
api_version="2023-03-15-preview")
embed_model = llama_index.embeddings.AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="te-ada-002",
api_key=azure_openai_key,
azure_endpoint=azure_endpoint,
api_version="2023-03-15-preview"
)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
vector_store = WeaviateVectorStore(
weaviate_client=self.client, index_name="Lectures", text_key="content"
)
retriever = VectorStoreIndex.from_vector_store(vector_store, service_context=service_context).as_retriever(
similarity_top_k=1
)
nodes = retriever.retrieve(generated_lecture)
pprint_source_node(nodes[0])
print(nodes[0].node.metadata)
# add hypothetical document embeddings (hyde)
if lecture_id != "" and lecture_id is not None:
response = (
self.client.query
.get("Lectures", ["content", "slide_id", "page_interval", ])
.with_where({
"path": ["lecture_id"],
"operator": "Equal",
"valueText": lecture_id
})
.with_near_text({"concepts": generated_lecture})
#w.with_additional(f'rerank( query: "{user_message}", property: "content"){{score}}')
.with_generate(grouped_task=prompt(user_message))
.with_limit(1)
.do()
)
generated_response = response["data"]["Get"]["Lectures"][0]["_additional"]["generate"]["groupedResult"]
else:
response = (
self.client.query
.get("Lectures", ["content", "slide_id", "page_interval", "lecture_id"])
# alpha = 0 forces using a pure keyword search method (BM25)
# alpha = 1 forces using a pure vector search method
.with_hybrid(query=user_message,
alpha=1,
fusion_type=HybridFusion.RELATIVE_SCORE
)
# .with_additional(f'rerank( query: "{user_message}", property: "content"){{score}}')
.with_generate(grouped_task=prompt(user_message))
.with_limit(3)
.do()
)
generated_response = response["data"]["Get"]["Lectures"][0]["_additional"]["generate"]["groupedResult"]
slides = response["data"]["Get"]["Lectures"][0]["slide_id"]
page_interval = response["data"]["Get"]["Lectures"][0]["page_interval"]
print(json.dumps(response, indent=2))
return generated_response + f"""\n\nMore relevant information on the slides {slides} "pages":{page_interval} """
| [
"llama_index.embeddings.AzureOpenAIEmbedding",
"llama_index.ServiceContext.from_defaults",
"llama_index.response.pprint_utils.pprint_source_node",
"llama_index.llms.AzureOpenAI",
"llama_index.vector_stores.WeaviateVectorStore",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((707, 773), 'langchain.document_loaders.GutenbergLoader', 'GutenbergLoader', (['"""https://www.gutenberg.org/files/2591/2591-0.txt"""'], {}), "('https://www.gutenberg.org/files/2591/2591-0.txt')\n", (722, 773), False, 'from langchain.document_loaders import GutenbergLoader\n'), ((817, 892), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(0)', 'length_function': 'len'}), '(chunk_size=500, chunk_overlap=0, length_function=len)\n', (838, 892), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((994, 1102), 'weaviate.Client', 'weaviate.Client', ([], {'url': 'WEAVIATE_URL', 'additional_headers': "{'X-OpenAI-Api-Key': os.environ['OPENAI_API_KEY']}"}), "(url=WEAVIATE_URL, additional_headers={'X-OpenAI-Api-Key':\n os.environ['OPENAI_API_KEY']})\n", (1009, 1102), False, 'import weaviate\n'), ((1464, 1504), 'langchain.vectorstores.Weaviate', 'Weaviate', (['client', '"""Paragraph"""', '"""content"""'], {}), "(client, 'Paragraph', 'content')\n", (1472, 1504), False, 'from langchain.vectorstores import Weaviate\n'), ((1827, 1856), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_KEY"""'], {}), "('AZURE_OPENAI_KEY')\n", (1836, 1856), False, 'import os\n'), ((1874, 1908), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_ENDPOINT"""'], {}), "('AZURE_OPENAI_ENDPOINT')\n", (1883, 1908), False, 'import os\n'), ((1925, 1951), 'os.getenv', 'os.getenv', (['"""RESOURCE_NAME"""'], {}), "('RESOURCE_NAME')\n", (1934, 1951), False, 'import os\n'), ((2862, 2891), 'os.listdir', 'os.listdir', (['subdirectory_path'], {}), '(subdirectory_path)\n', (2872, 2891), False, 'import os\n'), ((2016, 2050), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_ENDPOINT"""'], {}), "('AZURE_OPENAI_ENDPOINT')\n", (2025, 2050), False, 'import os\n'), ((2064, 2093), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_KEY"""'], {}), "('AZURE_OPENAI_KEY')\n", (2073, 2093), False, 'import os\n'), ((4802, 4881), 'weaviate.Client', 'weaviate.Client', ([], {'url': '"""http://localhost:8080"""', 'additional_headers': 'api_key_header'}), "(url='http://localhost:8080', additional_headers=api_key_header)\n", (4817, 4881), False, 'import weaviate\n'), ((2955, 2996), 'os.path.join', 'os.path.join', (['subdirectory_path', 'filename'], {}), '(subdirectory_path, filename)\n', (2967, 2996), False, 'import os\n'), ((7628, 7654), 'os.listdir', 'os.listdir', (['directory_path'], {}), '(directory_path)\n', (7638, 7654), False, 'import os\n'), ((10504, 10688), 'llama_index.llms.AzureOpenAI', 'llama_index.llms.AzureOpenAI', ([], {'model': '"""gpt-35-turbo-16k"""', 'deployment_name': '"""gpt-35-16k"""', 'api_key': 'azure_openai_key', 'azure_endpoint': 'azure_endpoint', 'api_version': '"""2023-03-15-preview"""'}), "(model='gpt-35-turbo-16k', deployment_name=\n 'gpt-35-16k', api_key=azure_openai_key, azure_endpoint=azure_endpoint,\n api_version='2023-03-15-preview')\n", (10532, 10688), False, 'import llama_index\n'), ((10800, 11005), 'llama_index.embeddings.AzureOpenAIEmbedding', 'llama_index.embeddings.AzureOpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'deployment_name': '"""te-ada-002"""', 'api_key': 'azure_openai_key', 'azure_endpoint': 'azure_endpoint', 'api_version': '"""2023-03-15-preview"""'}), "(model='text-embedding-ada-002',\n deployment_name='te-ada-002', api_key=azure_openai_key, azure_endpoint=\n azure_endpoint, api_version='2023-03-15-preview')\n", (10843, 11005), False, 'import llama_index\n'), ((11121, 11183), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (11149, 11183), False, 'from llama_index import VectorStoreIndex, ServiceContext, set_global_service_context\n'), ((11213, 11308), 'llama_index.vector_stores.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'self.client', 'index_name': '"""Lectures"""', 'text_key': '"""content"""'}), "(weaviate_client=self.client, index_name='Lectures',\n text_key='content')\n", (11232, 11308), False, 'from llama_index.vector_stores import WeaviateVectorStore\n'), ((11574, 11602), 'llama_index.response.pprint_utils.pprint_source_node', 'pprint_source_node', (['nodes[0]'], {}), '(nodes[0])\n', (11592, 11602), False, 'from llama_index.response.pprint_utils import pprint_source_node\n'), ((13511, 13541), 'json.dumps', 'json.dumps', (['response'], {'indent': '(2)'}), '(response, indent=2)\n', (13521, 13541), False, 'import json\n'), ((3067, 3087), 'fitz.open', 'fitz.open', (['file_path'], {}), '(file_path)\n', (3076, 3087), False, 'import fitz\n'), ((7692, 7734), 'os.path.join', 'os.path.join', (['directory_path', 'subdirectory'], {}), '(directory_path, subdirectory)\n', (7704, 7734), False, 'import os\n'), ((7754, 7786), 'os.path.isdir', 'os.path.isdir', (['subdirectory_path'], {}), '(subdirectory_path)\n', (7767, 7786), False, 'import os\n'), ((3234, 3287), 'unstructured.cleaners.core.clean', 'clean', (['page_text'], {'bullets': '(True)', 'extra_whitespace': '(True)'}), '(page_text, bullets=True, extra_whitespace=True)\n', (3239, 3287), False, 'from unstructured.cleaners.core import clean\n'), ((11359, 11445), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store, service_context=\n service_context)\n', (11393, 11445), False, 'from llama_index import VectorStoreIndex, ServiceContext, set_global_service_context\n'), ((3970, 4023), 'unstructured.cleaners.core.clean', 'clean', (['last_page'], {'bullets': '(True)', 'extra_whitespace': '(True)'}), '(last_page, bullets=True, extra_whitespace=True)\n', (3975, 4023), False, 'from unstructured.cleaners.core import clean\n'), ((9510, 9528), 'time.sleep', 'time.sleep', (['(2 ** j)'], {}), '(2 ** j)\n', (9520, 9528), False, 'import time\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.filters) == 1:
filter = standard_filters.filters[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.filters:
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(VectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
self.index_name = index_name
self.text_field = text_field
self.vector_field = vector_field
self.batch_size = batch_size
self.distance_strategy = distance_strategy
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index
return f"llama_index-py-vs/{llama_index.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if await self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.filters) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships")
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.node_to_metadata_dict"
] | [((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3728, 3744), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3736, 3744), True, 'import numpy as np\n'), ((5409, 5429), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5427, 5429), False, 'import nest_asyncio\n'), ((16807, 16847), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (16811, 16847), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12293, 12368), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12303, 12368), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((9758, 9782), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9780, 9782), False, 'import asyncio\n'), ((11685, 11730), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (11706, 11730), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12440, 12504), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (12450, 12504), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13378, 13402), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (13400, 13402), False, 'import asyncio\n'), ((15585, 15609), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15607, 15609), False, 'import asyncio\n'), ((18698, 18729), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (18719, 18729), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3780, 3802), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3786, 3802), True, 'import numpy as np\n'), ((11919, 11931), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11929, 11931), False, 'import uuid\n'), ((19372, 19515), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (19380, 19515), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')] |
import json
import logging
import os
import re
import time
import xml.etree.ElementTree as ET
from collections import defaultdict, deque
from functools import partial
import backoff
import llama_index
import markdown
import openai
import tiktoken
from colorama import Fore
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from llama_index import (
Document,
GPTListIndex,
GPTVectorStoreIndex,
LLMPredictor,
ResponseSynthesizer,
ServiceContext,
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
from llama_index.indices.composability import ComposableGraph
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.retrievers import VectorIndexRetriever
from api.mygene_api import mygene_api
from api.myvariant_api import myvariant_api
from api.pubmed_api import pubmed_api
from config import OPENAI_API_KEY
logging.getLogger("llama_index").setLevel(logging.WARNING)
# file_handler = logging.FileHandler('utils.log')
# Configure the logging settings
# logging.basicConfig(level=logging.INFO, handlers=[file_handler])
MAX_TOKENS = 4097
api_info_mapping = {
"mygene": mygene_api,
"PubMed": pubmed_api,
"myvariant": myvariant_api,
}
api_key = OPENAI_API_KEY or os.environ["OPENAI_API_KEY"]
openai.api_key = api_key
def get_input(prompt, type_=None, min_=None, max_=None, range_=None):
if min_ is not None and max_ is not None and max_ < min_:
raise ValueError("min_ must be less than or equal to max_.")
while True:
ui = input(prompt)
if type_ is not None:
try:
ui = type_(ui)
except ValueError:
print(f"Input type must be {type_.__name__}!")
continue
if max_ is not None and ui > max_:
print(f"Input must be less than or equal to {max_}.")
elif min_ is not None and ui < min_:
print(f"Input must be greater than or equal to {min_}.")
elif range_ is not None and ui not in range_:
if isinstance(range_, range):
template = "Input must be between {} and {}."
print(template.format(range_.start, range_.stop))
else:
template = "Input must be {}."
print(template.format(", ".join(map(str, range_))))
else:
return ui
def select_task(task_list):
# Task list is actually a Queue
task_list = list(task_list)
print("\n\n")
choice = get_input(
Fore.LIGHTGREEN_EX
+ "\033[1mWhich task would you like to execute? Type 0 to create your own task! \033[0m",
type_=int,
min_=0,
max_=len(task_list),
)
if choice == 0:
task = input(Fore.LIGHTGREEN_EX + "\033[1mWrite your task! \033[0m")
else:
task = task_list.pop(choice - 1)
return task, deque(task_list)
def num_tokens_from_string(string: str, encoding_name: str = "gpt2") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def get_key_results(index, objective, top_k=20, additional_queries=[]):
"""Run final queries over retrieved documents and store in doc_store."""
if not index.docstore.docs:
print(
Fore.RED
+ "\033[1m\n! WARNING: NO TASKS RETURNED RESULTS. PLEASE TWEAK YOUR OBJECTIVE AND CHECK SPELLING. !\n\033[0m"
)
return []
print(Fore.CYAN + "\033[1m\n*****COMPILING KEY RESULTS*****\n\033[0m")
key_results = []
queries = [
"Give a brief high level summary of all the data.",
"Briefly list all the main points that the data covers.",
"Generate several creative hypotheses given the data.",
"What are some high level research directions to explore further given the data?",
f"Do your best to answer the objective: {objective} given the information.",
]
for query in queries:
print(Fore.CYAN + f"\nCOMPILING RESULT {query}\n")
res = None
try:
res, citation_data = query_knowledge_base(
index=index, query=query, list_index=False, top_k=top_k
)
except Exception as e:
print(f"Exception getting key result {query}, error {e}")
if res:
query = f"## {query}\n\n"
res_html = markdown.markdown(res)
res_citation = markdown.markdown(citation_data)
key_results.append(
(query, f"{res_html}\n\n### Citations\n\n{res_citation}\n\n")
)
print(Fore.CYAN + f"\nRESULTS COMPILED. SAVED TO DIRECTORY `out`\n")
return key_results
def get_max_completion_len(prompt):
tokens = num_tokens_from_string(prompt)
return MAX_TOKENS - tokens
def execute_python(code: str):
# ret is defined in the code string
loc = {}
try:
exec(code, globals(), loc)
except Exception as e:
print(f"Exception executing code {code}, {e}")
return
return loc["ret"]
def process_myvariant_result(results):
processed_result = []
if not isinstance(results, list):
results = [results]
for result in results:
variant_name = result.get("_id")
gene_affected = result.get("cadd", {}).get("gene", {}).get("genename")
consequence = result.get("cadd", {}).get("consequence")
cadd_score = result.get("cadd", {}).get("phred")
rsid = result.get("dbsnp", {}).get("rsid")
variant_data = ""
citation_data = ""
if variant_name:
variant_data += f"Variant Name: {variant_name}\n"
if gene_affected:
variant_data += f"Gene Affected: {gene_affected}\n"
if consequence:
variant_data += f"Consequence: {consequence}\n"
if cadd_score is not None:
variant_data += f"CADD Score: {cadd_score}\n"
if rsid:
variant_data += f"rsID: {rsid}\n"
processed_result.append((variant_data, {"citation_data": citation_data}))
return processed_result
def process_mygene_result(results):
processed_result = []
if not isinstance(results, list):
results = [results]
# Each result will be split into 2 documents: summary and pathway
for json_data in results:
name = json_data.get("name")
refseq_genomic = json_data.get("refseq", {}).get("genomic", [])
refseq_rna = json_data.get("refseq", {}).get("rna", [])
symbol = json_data.get("symbol")
taxid = json_data.get("taxid")
type_of_gene = json_data.get("type_of_gene")
pos = json_data.get("genomic_pos_hg19")
summary = json_data.get("summary")
generif = json_data.get("generif")
output_summary = ""
citation_data = ""
# Summary
if name:
output_summary += f"Gene Name: {name}\n"
if refseq_genomic:
output_summary += f"RefSeq genomic: {', '.join(refseq_genomic)}\n"
if refseq_rna:
output_summary += f"RefSeq rna: {', '.join(refseq_rna)}\n"
if symbol:
output_summary += f"Symbol: {symbol}\n"
if taxid:
output_summary += f"Tax ID: {taxid}\n"
if type_of_gene and type_of_gene != "unknown":
output_summary += f"Type of gene: {type_of_gene}\n"
if pos:
output_summary += f"Position: {pos}\n"
if summary:
output_summary += f"Summary of {name}: {summary}\n"
else:
# If not summary, use generifs.
if generif:
# Take 20 rifs max. Some genes have hundreds of rifs and the results size explodes.
for rif in generif[:20]:
pubmed = rif.get("pubmed")
text = rif.get("text")
if text:
output_summary += text
if pubmed:
citation_data += f" Pubmed ID: {pubmed}"
output_summary = output_summary.strip()
# logging.info(f"Mygene Summary result {name}, length is {str(len(output_summary))}")
if output_summary:
processed_result.append((output_summary, {"citation_data": citation_data}))
# Pathway
pathway = json_data.get("pathway")
if pathway:
kegg = pathway.get("kegg", [])
pid = pathway.get("pid", [])
reactome = pathway.get("reactome", [])
wikipathways = pathway.get("wikipathways", [])
netpath = pathway.get("netpath", [])
biocarta = pathway.get("biocarta", [])
pathway_elements = {
"kegg": kegg,
"pid": pid,
"reactome": reactome,
"wikipathways": wikipathways,
"netpath": netpath,
"biocarta": biocarta,
}
# mygene returns dicts instead of lists if singleton
# Wrap with list if not list
for k, v in pathway_elements.items():
if type(v) is not list:
pathway_elements[k] = [v]
output_pathway = ""
citation_data = ""
if name:
output_pathway += f"Gene Name: {name}\n"
if symbol:
output_pathway += f"Symbol: {symbol}\n"
if taxid:
output_pathway += f"Tax ID: {taxid}\n"
if type_of_gene and type_of_gene != "unknown":
output_pathway += f"Type of gene: {type_of_gene}\n"
if refseq_genomic:
output_pathway += f"RefSeq genomic: {', '.join(refseq_genomic)}\n"
if refseq_rna:
output_pathway += f"RefSeq rna: {', '.join(refseq_rna)}\n"
if pos:
output_pathway += f"Position: {pos}\n"
output_pathway += f"PATHWAYS\n\n"
for k, v in pathway_elements.items():
output_pathway += f"\n{k}:\n"
for item in v:
output_pathway += f" ID: {item.get('id', '')}"
output_pathway += f" Name: {item.get('name', '')}"
# logging.info(f"Mygene Pathway result {name}, length is {len(output_pathway)}")
output_pathway = output_pathway.strip()
if output_pathway:
processed_result.append(
(output_pathway, {"citation_data": citation_data})
)
return processed_result
def process_pubmed_result(result):
try:
root = ET.fromstring(result)
except Exception as e:
print(f"Cannot parse pubmed result, expected xml. {e}")
print("Adding whole document. Note this will lead to suboptimal results.")
return result if isinstance(result, list) else [result]
processed_result = []
for article in root:
res_ = ""
citation_data = ""
for title in article.iter("Title"):
res_ += f"{title.text}\n"
citation_data += f"{title.text}\n"
for abstract in article.iter("AbstractText"):
res_ += f"{abstract.text}\n"
for author in article.iter("Author"):
try:
citation_data += f"{author.find('LastName').text}"
citation_data += f", {author.find('ForeName').text}\n"
except:
pass
for journal in article.iter("Journal"):
res_ += f"{journal.find('Title').text}\n"
citation_data += f"{journal.find('Title').text}\n"
for volume in article.iter("Volume"):
citation_data += f"{volume.text}\n"
for issue in article.iter("Issue"):
citation_data += f"{issue.text}\n"
for pubdate in article.iter("PubDate"):
try:
year = pubdate.find("Year").text
citation_data += f"{year}"
month = pubdate.find("Month").text
citation_data += f"-{month}"
day = pubdate.find("Day").text
citation_data += f"-{day}\n"
except:
pass
for doi in article.iter("ELocationID"):
if doi.get("EIdType") == "doi":
res_ += f"{doi.text}\n"
if res_:
processed_result.append((res_, {"citation_data": citation_data}))
return processed_result
def get_code_params(code: str, preparam_text: str, postparam_text: str):
l = len(preparam_text)
preparam_index = code.find(preparam_text)
postparam_index = code.find(postparam_text)
if preparam_index == -1 or postparam_index == -1:
return
params = code[preparam_index + l : postparam_index].strip()
if params == "":
return
return params
def validate_llm_response(goal, response):
validation_prompt = f"I gave an LLM this goal: '{goal}' and it gave this response: '{response}'. Is this reasonable, or did something go wrong? [yes|no]"
validation_response = (
openai.Completion.create(
engine="text-davinci-003", prompt=validation_prompt, temperature=0.0
)
.choices[0]
.text.strip()
)
if validation_response.lower() == "yes":
return True
else:
return False
def generate_tool_prompt(task):
if "MYVARIANT" in task:
api_name = "myvariant"
elif "MYGENE" in task:
api_name = "mygene"
elif "PUBMED" in task:
api_name = "PubMed"
else:
print(f"Error. Tool not found in task: {task}")
return None
api_info = api_info_mapping[api_name]
prompt = f"""You have access to query the {api_name} API. If a task starts with '{api_name.upper()}:' then you should create the code to query the {api_name} API based off the documentation and return the code to complete your task. If you use the {api_name} API, do not answer with words, simply write the parameters used to call the function then cease output. Be sure it is valid python that will execute in a python interpreter.
---
Here is the {api_name} documentation
{api_info}
---
You should change the parameters to fit your specific task.
""".strip()
return prompt
def get_ada_embedding(text):
ada_embedding_max_size = 8191
text = text.replace("\n", " ")
if num_tokens_from_string(text) > ada_embedding_max_size:
# There must be a better way to do this.
text = text[:ada_embedding_max_size]
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[
"data"
][0]["embedding"]
def insert_doc_llama_index(index, doc_id, data, metadata={}, embedding=None):
if not embedding:
embedding = get_ada_embedding(data)
doc = Document(text=data, embedding=embedding, doc_id=doc_id, metadata=metadata)
doc.excluded_llm_metadata_keys = ["citation_data"]
doc.excluded_embed_metadata_keys = ["citation_data"]
index.insert(doc)
def handle_python_result(result, cache, task, doc_store, doc_store_task_key):
results_returned = True
params = result
doc_store["tasks"][doc_store_task_key]["result_code"] = result
tool = task.split(":")[0]
if tool == "MYGENE":
result = (
"from api.mygene_wrapper import mygene_wrapper\n"
+ result
+ "\nret = mygene_wrapper(query_term, size, from_)"
)
elif tool == "MYVARIANT":
result = (
"from api.myvariant_wrapper import myvariant_wrapper\n"
+ result
+ "\nret = myvariant_wrapper(query_term)"
)
elif tool == "PUBMED":
result = (
"from api.pubmed_wrapper import pubmed_wrapper\n"
+ result
+ "\nret = pubmed_wrapper(query_term, retmax, retstart)"
)
executed_result = execute_python(result)
if type(executed_result) is list:
executed_result = list(filter(lambda x: x, executed_result))
if (executed_result is not None) and (
not executed_result
): # Execution complete succesfully, but executed result was empty list
results_returned = False
result = "NOTE: Code returned no results\n\n" + result
print(Fore.BLUE + f"\nTask '{task}' completed but returned no results")
if "MYVARIANT" in task:
if results_returned:
cache["MYVARIANT"].append(f"---\n{params}---\n")
else:
cache["MYVARIANT"].append(
f"---\nNote: This call returned no results\n{params}---\n"
)
processed_result = process_myvariant_result(executed_result)
if "MYGENE" in task:
if results_returned:
cache["MYGENE"].append(f"---\n{params}---\n")
else:
cache["MYGENE"].append(
f"---\nNote: This call returned no results\n{params}---\n"
)
processed_result = process_mygene_result(executed_result)
if "PUBMED" in task:
if results_returned:
cache["PUBMED"].append(f"---\n{params}---\n")
else:
cache["PUBMED"].append(
f"---\nNote: This call returned no results\n{params}---\n"
)
processed_result = process_pubmed_result(executed_result)
if executed_result is None:
result = "NOTE: Code did not run succesfully\n\n" + result
print(
Fore.BLUE + f"Task '{task}' failed. Code {result} did not run succesfully."
)
if "MYGENE" in task:
cache["MYGENE"].append(
f"---\nNote: This call did not run succesfully\n{params}---\n"
)
if "PUBMED" in task:
cache["PUBMED"].append(
f"---\nNote: This call did not run succesfully\n{params}---\n"
)
if "MYVARIANT" in task:
cache["MYVARIANT"].append(
f"---\nNote: This call did not run succesfully\n{params}---\n"
)
return
return processed_result
def handle_results(
result, index, doc_store, doc_store_key, task_id_counter, RESULT_CUTOFF
):
for i, r in enumerate(result):
res, metadata = r[0], r[1]
res = str(res)[
:RESULT_CUTOFF
] # Occasionally an enormous result will slow the program to a halt. Not ideal to lose results but putting in place for now.
vectorized_data = get_ada_embedding(res)
task_id = f"doc_id_{task_id_counter}_{i}"
insert_doc_llama_index(
index=index,
doc_id=task_id,
data=res,
metadata=metadata,
embedding=vectorized_data,
)
doc_store["tasks"][doc_store_key]["results"].append(
{
"task_id_counter": task_id_counter,
"vectorized_data": vectorized_data,
"output": res,
"metadata": metadata,
}
)
def query_knowledge_base(
index,
query="Give a detailed but terse overview of all the information. Start with a high level summary and then go into details. Do not include any further instruction. Do not include filler words.",
response_mode="tree_summarize",
top_k=50,
list_index=False,
):
if not index.docstore.docs:
print(Fore.RED + "NO INFORMATION IN LLAMA INDEX")
return
# configure retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=top_k,
)
# configure response synthesizer
response_synthesizer = ResponseSynthesizer.from_args(
response_mode="tree_summarize",
)
# assemble query engine
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
)
if list_index:
query_response = index.query(query, response_mode="default")
else:
# From llama index docs: Empirically, setting response_mode="tree_summarize" also leads to better summarization results.
query_response = query_engine.query(query)
extra_info = ""
if query_response.metadata:
try:
extra_info = [
x.get("citation_data") for x in query_response.metadata.values()
]
if not any(extra_info):
extra_info = []
except Exception as e:
print("Issue getting extra info from llama index")
return query_response.response, "\n\n".join(extra_info)
def create_index(
api_key,
summaries=[],
temperature=0.0,
model_name="gpt-3.5-turbo-16k",
max_tokens=6000,
):
llm_predictor = LLMPredictor(
llm=ChatOpenAI(
temperature=temperature,
openai_api_key=api_key,
model_name=model_name,
max_tokens=max_tokens,
)
)
documents = []
for i, summary in enumerate(summaries):
doc = Document(text=summary, doc_id=str(i))
doc.excluded_llm_metadata_keys = ["citation_data"]
doc.excluded_embed_metadata_keys = ["citation_data"]
documents.append(doc)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, chunk_size=4000
)
return GPTVectorStoreIndex(documents, service_context=service_context)
def create_graph_index(
api_key,
indicies=[],
summaries=[],
temperature=0.0,
model_name="text-davinci-003",
max_tokens=2000,
):
llm_predictor = LLMPredictor(
llm=OpenAI(
temperature=temperature,
openai_api_key=api_key,
model_name=model_name,
max_tokens=max_tokens,
)
)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
graph = ComposableGraph.from_indices(
GPTListIndex,
indicies,
index_summaries=summaries,
service_context=service_context,
)
return graph
def create_list_index(
api_key,
summaries=[],
temperature=0.0,
model_name="text-davinci-003",
max_tokens=2000,
):
llm_predictor = LLMPredictor(
llm=OpenAI(
temperature=temperature,
openai_api_key=api_key,
model_name=model_name,
max_tokens=max_tokens,
)
)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
documents = []
for i, summary in enumerate(summaries):
documents.append(Document(text=summary, doc_id=str(i)))
index = GPTListIndex.from_documents(documents, service_context=service_context)
return index
@backoff.on_exception(
partial(backoff.expo, max_value=50),
(
openai.error.RateLimitError,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.ServiceUnavailableError,
openai.error.Timeout,
),
)
def get_gpt_completion(
prompt,
temp=0.0,
engine="text-davinci-003",
top_p=1,
frequency_penalty=0,
presence_penalty=0,
):
response = openai.Completion.create(
engine=engine,
prompt=prompt,
temperature=temp,
max_tokens=get_max_completion_len(prompt),
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
)
return response.choices[0].text.strip()
@backoff.on_exception(
partial(backoff.expo, max_value=50),
(
openai.error.RateLimitError,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.ServiceUnavailableError,
openai.error.Timeout,
),
)
def get_gpt_chat_completion(
system_prompt, user_prompt, model="gpt-3.5-turbo-16k", temp=0.0
):
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
temperature=temp,
)
return response.choices[0]["message"]["content"].strip()
### FILE UTILS ###
def make_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def write_file(path, contents, mode="w"):
with open(path, mode) as f:
f.write(contents)
def read_file(path, mode="r"):
with open(path, mode) as f:
contents = f.read()
if not contents:
print(f"WARNING: file {path} empty")
return contents
def sanitize_dir_name(dir_name):
# Remove invalid characters
dir_name = re.sub(r'[<>:"/\|?*]', "_", dir_name)
dir_name = dir_name.replace(" ", "_")
# Remove leading period
if dir_name.startswith("."):
dir_name = dir_name[1:]
return dir_name
def save(
index,
doc_store,
OBJECTIVE,
current_datetime,
task_id_counter,
task_list,
completed_tasks,
cache,
reload_count,
summaries,
):
# Make basepath.
path = os.path.join("./out", sanitize_dir_name(OBJECTIVE) + "_" + current_datetime)
make_dir(path)
# Save llama index.
index.storage_context.persist(persist_dir=os.path.join(path, "index.json"))
# Save program state.
state = {
"summaries": summaries,
"reload_count": reload_count,
"task_id_counter": task_id_counter,
"task_list": list(task_list),
"completed_tasks": completed_tasks,
"cache": dict(cache),
"current_datetime": current_datetime,
"objective": OBJECTIVE,
}
with open(os.path.join(path, "state.json"), "w") as outfile:
json.dump(state, outfile)
# Save results.
if "key_results" in doc_store:
if reload_count:
new_time = str(time.strftime("%Y-%m-%d_%H-%M-%S"))
header = f"# {OBJECTIVE}\nDate: {new_time}\n\n"
else:
header = f"# {OBJECTIVE}\nDate: {current_datetime}\n\n"
key_findings_path = os.path.join(path, f"key_findings_{reload_count}.md")
write_file(key_findings_path, header, mode="a+")
for res in doc_store["key_results"]:
content = f"{res[0]}{res[1]}"
write_file(key_findings_path, content, mode="a+")
for task, doc in doc_store["tasks"].items():
doc_path = os.path.join(path, task)
make_dir(doc_path)
result_path = os.path.join(doc_path, "results")
make_dir(result_path)
if "executive_summary" in doc:
write_file(
os.path.join(result_path, "executive_summary.txt"),
doc["executive_summary"],
)
if "result_code" in doc:
write_file(os.path.join(result_path, "api_call.txt"), doc["result_code"])
for i, result in enumerate(doc["results"]):
result_path_i = os.path.join(result_path, str(i))
make_dir(result_path_i)
write_file(os.path.join(result_path_i, "output.txt"), result["output"])
write_file(
os.path.join(result_path_i, "vector.txt"),
str(result["vectorized_data"]),
)
def load(path):
llm_predictor = LLMPredictor(
llm=ChatOpenAI(
temperature=0,
openai_api_key=api_key,
model_name="gpt-3.5-turbo-16k",
max_tokens=6000,
)
)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, chunk_size=4000
)
# rebuild storage context
storage_context = StorageContext.from_defaults(
persist_dir=os.path.join(path, "index.json")
)
index = load_index_from_storage(
storage_context=storage_context, service_context=service_context
)
state_path = os.path.join(path, "state.json")
if os.path.exists(state_path):
with open(state_path, "r") as f:
json_data = json.load(f)
try:
reload_count = json_data["reload_count"] + 1
task_id_counter = json_data["task_id_counter"]
task_list = json_data["task_list"]
completed_tasks = json_data["completed_tasks"]
cache = defaultdict(list, json_data["cache"])
current_datetime = json_data["current_datetime"]
objective = json_data["objective"]
summaries = json_data["summaries"]
except KeyError as e:
raise Exception(
f"Missing key '{e.args[0]}' in JSON file at path '{state_path}'"
)
return (
index,
task_id_counter,
deque(task_list),
completed_tasks,
cache,
current_datetime,
objective,
reload_count,
summaries,
)
| [
"llama_index.GPTListIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.indices.composability.ComposableGraph.from_indices",
"llama_index.GPTVectorStoreIndex",
"llama_index.ResponseSynthesizer.from_args",
"llama_index.load_index_from_storage",
"llama_index.query_engine.RetrieverQueryEngine",
"llama_index.Document"
] | [((3048, 3084), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (3069, 3084), False, 'import tiktoken\n'), ((14814, 14888), 'llama_index.Document', 'Document', ([], {'text': 'data', 'embedding': 'embedding', 'doc_id': 'doc_id', 'metadata': 'metadata'}), '(text=data, embedding=embedding, doc_id=doc_id, metadata=metadata)\n', (14822, 14888), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((19422, 19479), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': 'top_k'}), '(index=index, similarity_top_k=top_k)\n', (19442, 19479), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((19568, 19629), 'llama_index.ResponseSynthesizer.from_args', 'ResponseSynthesizer.from_args', ([], {'response_mode': '"""tree_summarize"""'}), "(response_mode='tree_summarize')\n", (19597, 19629), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((19693, 19782), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (19713, 19782), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((21128, 21202), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(4000)'}), '(llm_predictor=llm_predictor, chunk_size=4000)\n', (21156, 21202), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((21228, 21291), 'llama_index.GPTVectorStoreIndex', 'GPTVectorStoreIndex', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (21247, 21291), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((21681, 21738), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (21709, 21738), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((21752, 21869), 'llama_index.indices.composability.ComposableGraph.from_indices', 'ComposableGraph.from_indices', (['GPTListIndex', 'indicies'], {'index_summaries': 'summaries', 'service_context': 'service_context'}), '(GPTListIndex, indicies, index_summaries=\n summaries, service_context=service_context)\n', (21780, 21869), False, 'from llama_index.indices.composability import ComposableGraph\n'), ((22293, 22350), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (22321, 22350), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((22491, 22562), 'llama_index.GPTListIndex.from_documents', 'GPTListIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (22518, 22562), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((22609, 22644), 'functools.partial', 'partial', (['backoff.expo'], {'max_value': '(50)'}), '(backoff.expo, max_value=50)\n', (22616, 22644), False, 'from functools import partial\n'), ((23696, 23862), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'model', 'messages': "[{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content':\n user_prompt}]", 'temperature': 'temp'}), "(model=model, messages=[{'role': 'system',\n 'content': system_prompt}, {'role': 'user', 'content': user_prompt}],\n temperature=temp)\n", (23724, 23862), False, 'import openai\n'), ((23344, 23379), 'functools.partial', 'partial', (['backoff.expo'], {'max_value': '(50)'}), '(backoff.expo, max_value=50)\n', (23351, 23379), False, 'from functools import partial\n'), ((24449, 24486), 're.sub', 're.sub', (['"""[<>:"/\\\\|?*]"""', '"""_"""', 'dir_name'], {}), '(\'[<>:"/\\\\|?*]\', \'_\', dir_name)\n', (24455, 24486), False, 'import re\n'), ((27227, 27301), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(4000)'}), '(llm_predictor=llm_predictor, chunk_size=4000)\n', (27255, 27301), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((27471, 27565), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(storage_context=storage_context, service_context=\n service_context)\n', (27494, 27565), False, 'from llama_index import Document, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, ResponseSynthesizer, ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((27592, 27624), 'os.path.join', 'os.path.join', (['path', '"""state.json"""'], {}), "(path, 'state.json')\n", (27604, 27624), False, 'import os\n'), ((27632, 27658), 'os.path.exists', 'os.path.exists', (['state_path'], {}), '(state_path)\n', (27646, 27658), False, 'import os\n'), ((899, 931), 'logging.getLogger', 'logging.getLogger', (['"""llama_index"""'], {}), "('llama_index')\n", (916, 931), False, 'import logging\n'), ((2879, 2895), 'collections.deque', 'deque', (['task_list'], {}), '(task_list)\n', (2884, 2895), False, 'from collections import defaultdict, deque\n'), ((10649, 10670), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['result'], {}), '(result)\n', (10662, 10670), True, 'import xml.etree.ElementTree as ET\n'), ((24036, 24056), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (24050, 24056), False, 'import os\n'), ((24066, 24083), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (24077, 24083), False, 'import os\n'), ((25481, 25506), 'json.dump', 'json.dump', (['state', 'outfile'], {}), '(state, outfile)\n', (25490, 25506), False, 'import json\n'), ((25821, 25874), 'os.path.join', 'os.path.join', (['path', 'f"""key_findings_{reload_count}.md"""'], {}), "(path, f'key_findings_{reload_count}.md')\n", (25833, 25874), False, 'import os\n'), ((26151, 26175), 'os.path.join', 'os.path.join', (['path', 'task'], {}), '(path, task)\n', (26163, 26175), False, 'import os\n'), ((26225, 26258), 'os.path.join', 'os.path.join', (['doc_path', '"""results"""'], {}), "(doc_path, 'results')\n", (26237, 26258), False, 'import os\n'), ((28455, 28471), 'collections.deque', 'deque', (['task_list'], {}), '(task_list)\n', (28460, 28471), False, 'from collections import defaultdict, deque\n'), ((4448, 4470), 'markdown.markdown', 'markdown.markdown', (['res'], {}), '(res)\n', (4465, 4470), False, 'import markdown\n'), ((4498, 4530), 'markdown.markdown', 'markdown.markdown', (['citation_data'], {}), '(citation_data)\n', (4515, 4530), False, 'import markdown\n'), ((20669, 20779), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temperature', 'openai_api_key': 'api_key', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, openai_api_key=api_key, model_name=\n model_name, max_tokens=max_tokens)\n', (20679, 20779), False, 'from langchain.chat_models import ChatOpenAI\n'), ((21492, 21598), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'openai_api_key': 'api_key', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, openai_api_key=api_key, model_name=\n model_name, max_tokens=max_tokens)\n', (21498, 21598), False, 'from langchain import OpenAI\n'), ((22104, 22210), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'openai_api_key': 'api_key', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, openai_api_key=api_key, model_name=\n model_name, max_tokens=max_tokens)\n', (22110, 22210), False, 'from langchain import OpenAI\n'), ((25023, 25055), 'os.path.join', 'os.path.join', (['path', '"""index.json"""'], {}), "(path, 'index.json')\n", (25035, 25055), False, 'import os\n'), ((25422, 25454), 'os.path.join', 'os.path.join', (['path', '"""state.json"""'], {}), "(path, 'state.json')\n", (25434, 25454), False, 'import os\n'), ((27041, 27144), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'api_key', 'model_name': '"""gpt-3.5-turbo-16k"""', 'max_tokens': '(6000)'}), "(temperature=0, openai_api_key=api_key, model_name=\n 'gpt-3.5-turbo-16k', max_tokens=6000)\n", (27051, 27144), False, 'from langchain.chat_models import ChatOpenAI\n'), ((27419, 27451), 'os.path.join', 'os.path.join', (['path', '"""index.json"""'], {}), "(path, 'index.json')\n", (27431, 27451), False, 'import os\n'), ((27725, 27737), 'json.load', 'json.load', (['f'], {}), '(f)\n', (27734, 27737), False, 'import json\n'), ((14550, 14619), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': '[text]', 'model': '"""text-embedding-ada-002"""'}), "(input=[text], model='text-embedding-ada-002')\n", (14573, 14619), False, 'import openai\n'), ((25615, 25649), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%H-%M-%S"""'], {}), "('%Y-%m-%d_%H-%M-%S')\n", (25628, 25649), False, 'import time\n'), ((26369, 26419), 'os.path.join', 'os.path.join', (['result_path', '"""executive_summary.txt"""'], {}), "(result_path, 'executive_summary.txt')\n", (26381, 26419), False, 'import os\n'), ((26533, 26574), 'os.path.join', 'os.path.join', (['result_path', '"""api_call.txt"""'], {}), "(result_path, 'api_call.txt')\n", (26545, 26574), False, 'import os\n'), ((26771, 26812), 'os.path.join', 'os.path.join', (['result_path_i', '"""output.txt"""'], {}), "(result_path_i, 'output.txt')\n", (26783, 26812), False, 'import os\n'), ((26872, 26913), 'os.path.join', 'os.path.join', (['result_path_i', '"""vector.txt"""'], {}), "(result_path_i, 'vector.txt')\n", (26884, 26913), False, 'import os\n'), ((28018, 28055), 'collections.defaultdict', 'defaultdict', (['list', "json_data['cache']"], {}), "(list, json_data['cache'])\n", (28029, 28055), False, 'from collections import defaultdict, deque\n'), ((13093, 13192), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'validation_prompt', 'temperature': '(0.0)'}), "(engine='text-davinci-003', prompt=\n validation_prompt, temperature=0.0)\n", (13117, 13192), False, 'import openai\n')] |
import streamlit as st
import requests
import base64
import os
import llama_index
from audio_recorder_streamlit import audio_recorder
from openai import OpenAI
from llama_index import VectorStoreIndex, SimpleDirectoryReader
from scipy.io.wavfile import write
os.environ['OPENAI_API_KEY'] = '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'
API_KEY = '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'
def RAG(text):
documents = SimpleDirectoryReader("db3").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query(text)
# Extract the text from the response
response_text = response.response if hasattr(response, 'response') else str(response)
return response_text
def linkRAGhindi(text):
new_prompt="निम्नलिखित प्रश्न के लिए सबसे उपयुक्त वेबसाइट लिंक दें"+text
documents = SimpleDirectoryReader("db1").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query(new_prompt)
# Extract the text from the response
response_text = response.response if hasattr(response, 'response') else str(response)
return response_text
def rechindi(text):
new_prompt="निम्नलिखित प्रश्न के लिए सबसे उपयुक्त वेबसाइट लिंक दें"+text
documents = SimpleDirectoryReader("db2").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query(new_prompt)
# Extract the text from the response
response_text = response.response if hasattr(response, 'response') else str(response)
return response_text
def linkRAGenglish(text):
new_prompt="Give the most appropiate website link for the following question "+text
documents = SimpleDirectoryReader("db1").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query(new_prompt)
# Extract the text from the response
response_text = response.response if hasattr(response, 'response') else str(response)
return response_text
def recenglish(text):
new_prompt="Give the most intresting other website link for the following question "+text
documents = SimpleDirectoryReader("db2").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query(new_prompt)
# Extract the text from the response
response_text = response.response if hasattr(response, 'response') else str(response)
return response_text
def transcribe_text_to_voice_english(audio_location):
client = OpenAI(api_key=API_KEY)
audio_file = open(audio_location, "rb")
transcript = client.audio.transcriptions.create(model="whisper-1", file=audio_file)
return transcript.text
def transcribe_text_to_voice_hindi(audio_location):
url = "https://api.runpod.ai/v2/faster-whisper/runsync"
with open(audio_location, "rb") as audio_file:
audio_base64 = base64.b64encode(audio_file.read()).decode('utf-8')
payload = {
"input": {
"audio_base64": audio_base64,
"model": "small",
"transcription": "plain_text",
"translate": True,
"language": "hi",
"temperature": 0,
"best_of": 5,
"beam_size": 5,
"patience": 1,
"suppress_tokens": "-1",
"condition_on_previous_text": False,
"temperature_increment_on_fallback": 0.2,
"compression_ratio_threshold": 2.4,
"logprob_threshold": -1,
"no_speech_threshold": 0.6,
"word_timestamps": False
},
"enable_vad": False
}
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": "X01PG949AHTOVRYHLQZKSRIWN82UHBUU5JYLNAHM"
}
response = requests.post(url, json=payload, headers=headers)
response_json = response.json()
transcription = response_json["output"]["transcription"]
translation = response_json["output"]["translation"].strip().split('\n')[-1].strip()
return transcription, translation
def recommendation(text):
client = OpenAI(api_key=API_KEY)
messages = [{"role": "user", "content": text}]
response = client.chat.completions.create(model="gpt-3.5-turbo-1106", messages=messages)
return response.choices[0].message.content
def text_to_speech_ai(speech_file_path, api_response):
client = OpenAI(api_key=API_KEY)
response = client.audio.speech.create(model="tts-1",voice="nova",input=api_response)
response.stream_to_file(speech_file_path)
st.title("🚀 SHRESHTH 💬 Bhuvan Assistant")
# Radio wheel for language selection
language = st.radio("Language/भाषा",["English", "हिंदी"])
# Displaying description based on selected language
if language == "English":
mode = st.radio("Select Mode Of Input", ["Voice","Text"])
st.write("Smart - Helpful - Robust - Effortless - System for Text-to-speech and Human-like Assistance")
if mode == "Voice" or mode == "आवाज":
st.write("Click on the voice recorder and let me know how I can help you today with your Queries Regarding Bhuvan!")
audio_bytes = audio_recorder(
text="",
recording_color="#e8b62c",
neutral_color="#6aa36f",
icon_name="microphone",
icon_size="2x",
)
if audio_bytes:
# Save the Recorded File
audio_location = "audio_file.wav"
with open(audio_location, "wb") as f:
f.write(audio_bytes)
if language == "English":
text=transcribe_text_to_voice_english(audio_location)
st.write(text)
else:
text,trans=transcribe_text_to_voice_hindi(audio_location)
st.write(text)
link_response = linkRAGenglish(text)
st.write("SHRESHTH:", link_response)
api_response = RAG(text)
st.write("SHRESHTH:", api_response)
speech_file_path = 'audio_response.mp3'
text_to_speech_ai(speech_file_path, api_response)
st.audio(speech_file_path)
recctext="recommend top three other websites that could interest the user depending on this link and answer : " + link_response + api_response
recc=linkRAGenglish(recctext)
st.write("SHRESHTH:", recc)
else:
# Text input option
text_input = st.text_area("Enter your text here and press Enter", "")
if st.button("Submit"):
# Process the entered text
link_response = linkRAGenglish(text_input)
st.write("SHRESHTH:", link_response)
api_response = RAG(text_input)
st.write("SHRESHTH:", api_response)
# Read out the text response using tts
speech_file_path = 'audio_response.mp3'
text_to_speech_ai(speech_file_path, api_response)
st.audio(speech_file_path)
recctext="recommend top three other websites that could interest the user depending on this link and answer : " + link_response + api_response
recc=linkRAGenglish(recctext)
st.write("SHRESHTH:", recc)
else:
mode = st.radio("इनपुट मोड का चयन करें", ["आवाज", "टेक्स्ट"])
st.write("स्मार्ट - सहायक - मजबूत - प्रयासहीन - पाठ-से-बोल के लिए एक सिस्टम और मानव जैसी सहायता")
if mode == "Voice" or mode == "आवाज" or mode == "ভয়েস":
st.write("आवाज रेकॉर्डर पर क्लिक करें और मुझसे यह बताएं कि आज आपकी भुवन से संबंधित सवालों में मैं आपकी कैसे मदद कर सकता हूँ!")
audio_bytes = audio_recorder(
text="",
recording_color="#e8b62c",
neutral_color="#6aa36f",
icon_name="microphone",
icon_size="2x",
)
if audio_bytes:
# Save the Recorded File
audio_location = "audio_file.wav"
with open(audio_location, "wb") as f:
f.write(audio_bytes)
if language == "English":
text=transcribe_text_to_voice_english(audio_location)
st.write(text)
else:
text,trans=transcribe_text_to_voice_hindi(audio_location)
st.write(text)
link_response = linkRAGhindi(text)
st.write("श्रेष्ठ:", link_response)
api_response = RAG(text)
st.write("श्रेष्ठ:", api_response)
# Read out the text response using tts
speech_file_path = 'audio_response.mp3'
text_to_speech_ai(speech_file_path, api_response)
st.audio(speech_file_path)
recctext="recommend top three other websites that could interest the user depending on this link and answer : " + link_response + api_response
recc=rechindi(recctext)
st.write("श्रेष्ठ:", recc)
else:
# Text input option
text_input = st.text_area("आप यहाँ अपना टेक्स्ट दर्ज करें और एंटर दबाएं", "")
if st.button("एंटर"):
# Process the entered text
link_response = linkRAGhindi(text_input)
st.write("श्रेष्ठ:", link_response)
api_response = RAG(text_input)
st.write("श्रेष्ठ:", api_response)
# Read out the text response using tts
speech_file_path = 'audio_response.mp3'
text_to_speech_ai(speech_file_path, api_response)
st.audio(speech_file_path)
recctext="recommend top three other websites that could interest the user depending on this link and answer : " + link_response + api_response
recc=rechindi(recctext)
st.write("श्रेष्ठ:", recc)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader"
] | [((4762, 4803), 'streamlit.title', 'st.title', (['"""🚀 SHRESHTH 💬 Bhuvan Assistant"""'], {}), "('🚀 SHRESHTH 💬 Bhuvan Assistant')\n", (4770, 4803), True, 'import streamlit as st\n'), ((4853, 4900), 'streamlit.radio', 'st.radio', (['"""Language/भाषा"""', "['English', 'हिंदी']"], {}), "('Language/भाषा', ['English', 'हिंदी'])\n", (4861, 4900), True, 'import streamlit as st\n'), ((492, 534), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (523, 534), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((950, 992), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (981, 992), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((1410, 1452), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1441, 1452), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((1886, 1928), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1917, 1928), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((2359, 2401), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2390, 2401), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((2714, 2737), 'openai.OpenAI', 'OpenAI', ([], {'api_key': 'API_KEY'}), '(api_key=API_KEY)\n', (2720, 2737), False, 'from openai import OpenAI\n'), ((4000, 4049), 'requests.post', 'requests.post', (['url'], {'json': 'payload', 'headers': 'headers'}), '(url, json=payload, headers=headers)\n', (4013, 4049), False, 'import requests\n'), ((4315, 4338), 'openai.OpenAI', 'OpenAI', ([], {'api_key': 'API_KEY'}), '(api_key=API_KEY)\n', (4321, 4338), False, 'from openai import OpenAI\n'), ((4598, 4621), 'openai.OpenAI', 'OpenAI', ([], {'api_key': 'API_KEY'}), '(api_key=API_KEY)\n', (4604, 4621), False, 'from openai import OpenAI\n'), ((4989, 5040), 'streamlit.radio', 'st.radio', (['"""Select Mode Of Input"""', "['Voice', 'Text']"], {}), "('Select Mode Of Input', ['Voice', 'Text'])\n", (4997, 5040), True, 'import streamlit as st\n'), ((5044, 5157), 'streamlit.write', 'st.write', (['"""Smart - Helpful - Robust - Effortless - System for Text-to-speech and Human-like Assistance"""'], {}), "(\n 'Smart - Helpful - Robust - Effortless - System for Text-to-speech and Human-like Assistance'\n )\n", (5052, 5157), True, 'import streamlit as st\n'), ((7402, 7456), 'streamlit.radio', 'st.radio', (['"""इनपुट मोड का चयन करें"""', "['आवाज', 'टेक्स्ट']"], {}), "('इनपुट मोड का चयन करें', ['आवाज', 'टेक्स्ट'])\n", (7410, 7456), True, 'import streamlit as st\n'), ((7461, 7568), 'streamlit.write', 'st.write', (['"""स्मार्ट - सहायक - मजबूत - प्रयासहीन - पाठ-से-बोल के लिए एक सिस्टम और मानव जैसी सहायता"""'], {}), "(\n 'स्मार्ट - सहायक - मजबूत - प्रयासहीन - पाठ-से-बोल के लिए एक सिस्टम और मानव जैसी सहायता'\n )\n", (7469, 7568), True, 'import streamlit as st\n'), ((5198, 5324), 'streamlit.write', 'st.write', (['"""Click on the voice recorder and let me know how I can help you today with your Queries Regarding Bhuvan!"""'], {}), "(\n 'Click on the voice recorder and let me know how I can help you today with your Queries Regarding Bhuvan!'\n )\n", (5206, 5324), True, 'import streamlit as st\n'), ((5337, 5456), 'audio_recorder_streamlit.audio_recorder', 'audio_recorder', ([], {'text': '""""""', 'recording_color': '"""#e8b62c"""', 'neutral_color': '"""#6aa36f"""', 'icon_name': '"""microphone"""', 'icon_size': '"""2x"""'}), "(text='', recording_color='#e8b62c', neutral_color='#6aa36f',\n icon_name='microphone', icon_size='2x')\n", (5351, 5456), False, 'from audio_recorder_streamlit import audio_recorder\n'), ((6619, 6675), 'streamlit.text_area', 'st.text_area', (['"""Enter your text here and press Enter"""', '""""""'], {}), "('Enter your text here and press Enter', '')\n", (6631, 6675), True, 'import streamlit as st\n'), ((6687, 6706), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (6696, 6706), True, 'import streamlit as st\n'), ((7629, 7765), 'streamlit.write', 'st.write', (['"""आवाज रेकॉर्डर पर क्लिक करें और मुझसे यह बताएं कि आज आपकी भुवन से संबंधित सवालों में मैं आपकी कैसे मदद कर सकता हूँ!"""'], {}), "(\n 'आवाज रेकॉर्डर पर क्लिक करें और मुझसे यह बताएं कि आज आपकी भुवन से संबंधित सवालों में मैं आपकी कैसे मदद कर सकता हूँ!'\n )\n", (7637, 7765), True, 'import streamlit as st\n'), ((7778, 7897), 'audio_recorder_streamlit.audio_recorder', 'audio_recorder', ([], {'text': '""""""', 'recording_color': '"""#e8b62c"""', 'neutral_color': '"""#6aa36f"""', 'icon_name': '"""microphone"""', 'icon_size': '"""2x"""'}), "(text='', recording_color='#e8b62c', neutral_color='#6aa36f',\n icon_name='microphone', icon_size='2x')\n", (7792, 7897), False, 'from audio_recorder_streamlit import audio_recorder\n'), ((9118, 9182), 'streamlit.text_area', 'st.text_area', (['"""आप यहाँ अपना टेक्स्ट दर्ज करें और एंटर दबाएं"""', '""""""'], {}), "('आप यहाँ अपना टेक्स्ट दर्ज करें और एंटर दबाएं', '')\n", (9130, 9182), True, 'import streamlit as st\n'), ((9194, 9211), 'streamlit.button', 'st.button', (['"""एंटर"""'], {}), "('एंटर')\n", (9203, 9211), True, 'import streamlit as st\n'), ((439, 467), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""db3"""'], {}), "('db3')\n", (460, 467), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((897, 925), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""db1"""'], {}), "('db1')\n", (918, 925), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((1357, 1385), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""db2"""'], {}), "('db2')\n", (1378, 1385), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((1833, 1861), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""db1"""'], {}), "('db1')\n", (1854, 1861), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((2306, 2334), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""db2"""'], {}), "('db2')\n", (2327, 2334), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader\n'), ((6045, 6081), 'streamlit.write', 'st.write', (['"""SHRESHTH:"""', 'link_response'], {}), "('SHRESHTH:', link_response)\n", (6053, 6081), True, 'import streamlit as st\n'), ((6131, 6166), 'streamlit.write', 'st.write', (['"""SHRESHTH:"""', 'api_response'], {}), "('SHRESHTH:', api_response)\n", (6139, 6166), True, 'import streamlit as st\n'), ((6293, 6319), 'streamlit.audio', 'st.audio', (['speech_file_path'], {}), '(speech_file_path)\n', (6301, 6319), True, 'import streamlit as st\n'), ((6531, 6558), 'streamlit.write', 'st.write', (['"""SHRESHTH:"""', 'recc'], {}), "('SHRESHTH:', recc)\n", (6539, 6558), True, 'import streamlit as st\n'), ((6814, 6850), 'streamlit.write', 'st.write', (['"""SHRESHTH:"""', 'link_response'], {}), "('SHRESHTH:', link_response)\n", (6822, 6850), True, 'import streamlit as st\n'), ((6906, 6941), 'streamlit.write', 'st.write', (['"""SHRESHTH:"""', 'api_response'], {}), "('SHRESHTH:', api_response)\n", (6914, 6941), True, 'import streamlit as st\n'), ((7119, 7145), 'streamlit.audio', 'st.audio', (['speech_file_path'], {}), '(speech_file_path)\n', (7127, 7145), True, 'import streamlit as st\n'), ((7357, 7384), 'streamlit.write', 'st.write', (['"""SHRESHTH:"""', 'recc'], {}), "('SHRESHTH:', recc)\n", (7365, 7384), True, 'import streamlit as st\n'), ((8483, 8518), 'streamlit.write', 'st.write', (['"""श्रेष्ठ:"""', 'link_response'], {}), "('श्रेष्ठ:', link_response)\n", (8491, 8518), True, 'import streamlit as st\n'), ((8568, 8602), 'streamlit.write', 'st.write', (['"""श्रेष्ठ:"""', 'api_response'], {}), "('श्रेष्ठ:', api_response)\n", (8576, 8602), True, 'import streamlit as st\n'), ((8786, 8812), 'streamlit.audio', 'st.audio', (['speech_file_path'], {}), '(speech_file_path)\n', (8794, 8812), True, 'import streamlit as st\n'), ((9018, 9044), 'streamlit.write', 'st.write', (['"""श्रेष्ठ:"""', 'recc'], {}), "('श्रेष्ठ:', recc)\n", (9026, 9044), True, 'import streamlit as st\n'), ((9317, 9352), 'streamlit.write', 'st.write', (['"""श्रेष्ठ:"""', 'link_response'], {}), "('श्रेष्ठ:', link_response)\n", (9325, 9352), True, 'import streamlit as st\n'), ((9408, 9442), 'streamlit.write', 'st.write', (['"""श्रेष्ठ:"""', 'api_response'], {}), "('श्रेष्ठ:', api_response)\n", (9416, 9442), True, 'import streamlit as st\n'), ((9625, 9651), 'streamlit.audio', 'st.audio', (['speech_file_path'], {}), '(speech_file_path)\n', (9633, 9651), True, 'import streamlit as st\n'), ((9857, 9883), 'streamlit.write', 'st.write', (['"""श्रेष्ठ:"""', 'recc'], {}), "('श्रेष्ठ:', recc)\n", (9865, 9883), True, 'import streamlit as st\n'), ((5846, 5860), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (5854, 5860), True, 'import streamlit as st\n'), ((5967, 5981), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (5975, 5981), True, 'import streamlit as st\n'), ((8283, 8297), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (8291, 8297), True, 'import streamlit as st\n'), ((8404, 8418), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (8412, 8418), True, 'import streamlit as st\n')] |
import json
import logging
import os
import re
import time
import xml.etree.ElementTree as ET
from collections import defaultdict, deque
from functools import partial
import backoff
import llama_index
import markdown
import openai
import tiktoken
from colorama import Fore
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from llama_index import Document, GPTVectorStoreIndex, LLMPredictor, ServiceContext, GPTListIndex
from llama_index.indices.composability import ComposableGraph
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_index import StorageContext, load_index_from_storage, ServiceContext
from llama_index import (
VectorStoreIndex,
ResponseSynthesizer,
)
from api.mygene_api import mygene_api
from api.pubmed_api import pubmed_api
from api.myvariant_api import myvariant_api
from config import OPENAI_API_KEY
logging.getLogger("llama_index").setLevel(logging.WARNING)
#file_handler = logging.FileHandler('utils.log')
# Configure the logging settings
#logging.basicConfig(level=logging.INFO, handlers=[file_handler])
MAX_TOKENS = 4097
api_info_mapping = {"mygene": mygene_api, "PubMed": pubmed_api, "myvariant": myvariant_api}
api_key = OPENAI_API_KEY or os.environ["OPENAI_API_KEY"]
openai.api_key = api_key
def get_input(prompt, type_=None, min_=None, max_=None, range_=None):
if min_ is not None and max_ is not None and max_ < min_:
raise ValueError("min_ must be less than or equal to max_.")
while True:
ui = input(prompt)
if type_ is not None:
try:
ui = type_(ui)
except ValueError:
print(f"Input type must be {type_.__name__}!")
continue
if max_ is not None and ui > max_:
print(f"Input must be less than or equal to {max_}.")
elif min_ is not None and ui < min_:
print(f"Input must be greater than or equal to {min_}.")
elif range_ is not None and ui not in range_:
if isinstance(range_, range):
template = "Input must be between {} and {}."
print(template.format(range_.start, range_.stop))
else:
template = "Input must be {}."
print(template.format(", ".join(map(str, range_))))
else:
return ui
def select_task(task_list):
# Task list is actually a Queue
task_list = list(task_list)
print('\n\n')
choice = get_input(Fore.LIGHTGREEN_EX + "\033[1mWhich task would you like to execute? Type 0 to create your own task! \033[0m", type_=int, min_=0, max_=len(task_list))
if choice == 0:
task = input(Fore.LIGHTGREEN_EX + "\033[1mWrite your task! \033[0m")
else:
task = task_list.pop(choice - 1)
return task, deque(task_list)
def num_tokens_from_string(string: str, encoding_name: str = "gpt2") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def get_key_results(index, objective, top_k=20, additional_queries=[]):
"""Run final queries over retrieved documents and store in doc_store."""
if not index.docstore.docs:
print(Fore.RED + "\033[1m\n! WARNING: NO TASKS RETURNED RESULTS. PLEASE TWEAK YOUR OBJECTIVE AND CHECK SPELLING. !\n\033[0m")
return []
print(Fore.CYAN + "\033[1m\n*****COMPILING KEY RESULTS*****\n\033[0m")
key_results = []
queries = [
"Give a brief high level summary of all the data.",
"Briefly list all the main points that the data covers.",
"Generate several creative hypotheses given the data.",
"What are some high level research directions to explore further given the data?",
f"Do your best to answer the objective: {objective} given the information.",
]
for query in queries:
print(Fore.CYAN + f"\nCOMPILING RESULT {query}\n")
res = None
try:
res, citation_data = query_knowledge_base(index=index, query=query, list_index=False, top_k=top_k)
except Exception as e:
print(f"Exception getting key result {query}, error {e}")
if res:
query = f"## {query}\n\n"
res_html = markdown.markdown(res)
res_citation = markdown.markdown(citation_data)
key_results.append((query, f"{res_html}\n\n### Citations\n\n{res_citation}\n\n"))
print(Fore.CYAN + f"\nRESULTS COMPILED. SAVED TO DIRECTORY `out`\n")
return key_results
def get_max_completion_len(prompt):
tokens = num_tokens_from_string(prompt)
return MAX_TOKENS - tokens
def execute_python(code: str):
# ret is defined in the code string
loc = {}
try:
exec(code, globals(), loc)
except Exception as e:
print(f"Exception executing code {code}, {e}")
return
return loc["ret"]
def process_myvariant_result(results):
processed_result = []
if not isinstance(results, list):
results = [results]
for result in results:
variant_name = result.get("_id")
gene_affected = result.get("cadd", {}).get("gene", {}).get("genename")
consequence = result.get("cadd", {}).get("consequence")
cadd_score = result.get("cadd", {}).get("phred")
rsid = result.get("dbsnp", {}).get("rsid")
variant_data = ""
citation_data = ""
if variant_name:
variant_data += f"Variant Name: {variant_name}\n"
if gene_affected:
variant_data += f"Gene Affected: {gene_affected}\n"
if consequence:
variant_data += f"Consequence: {consequence}\n"
if cadd_score is not None:
variant_data += f"CADD Score: {cadd_score}\n"
if rsid:
variant_data += f"rsID: {rsid}\n"
processed_result.append((variant_data,{"citation_data": citation_data}))
return processed_result
def process_mygene_result(results):
processed_result = []
if not isinstance(results, list):
results = [results]
# Each result will be split into 2 documents: summary and pathway
for json_data in results:
name = json_data.get("name")
refseq_genomic = json_data.get("refseq", {}).get("genomic", [])
refseq_rna = json_data.get("refseq", {}).get("rna", [])
symbol = json_data.get("symbol")
taxid = json_data.get("taxid")
type_of_gene = json_data.get("type_of_gene")
pos = json_data.get("genomic_pos_hg19")
summary = json_data.get("summary")
generif = json_data.get("generif")
output_summary = ""
citation_data = ""
# Summary
if name:
output_summary += f"Gene Name: {name}\n"
if refseq_genomic:
output_summary += f"RefSeq genomic: {', '.join(refseq_genomic)}\n"
if refseq_rna:
output_summary += f"RefSeq rna: {', '.join(refseq_rna)}\n"
if symbol:
output_summary += f"Symbol: {symbol}\n"
if taxid:
output_summary += f"Tax ID: {taxid}\n"
if type_of_gene and type_of_gene != 'unknown':
output_summary += f"Type of gene: {type_of_gene}\n"
if pos:
output_summary += f"Position: {pos}\n"
if summary:
output_summary += f"Summary of {name}: {summary}\n"
else:
# If not summary, use generifs.
if generif:
# Take 20 rifs max. Some genes have hundreds of rifs and the results size explodes.
for rif in generif[:20]:
pubmed = rif.get("pubmed")
text = rif.get("text")
if text:
output_summary += text
if pubmed:
citation_data += f" Pubmed ID: {pubmed}"
output_summary = output_summary.strip()
#logging.info(f"Mygene Summary result {name}, length is {str(len(output_summary))}")
if output_summary:
processed_result.append((output_summary, {"citation_data": citation_data}))
# Pathway
pathway = json_data.get("pathway")
if pathway:
kegg = pathway.get("kegg", [])
pid = pathway.get("pid", [])
reactome = pathway.get("reactome", [])
wikipathways = pathway.get("wikipathways", [])
netpath = pathway.get("netpath", [])
biocarta = pathway.get("biocarta", [])
pathway_elements = {"kegg": kegg, "pid": pid, "reactome": reactome, "wikipathways": wikipathways, "netpath": netpath, "biocarta": biocarta}
# mygene returns dicts instead of lists if singleton
# Wrap with list if not list
for k,v in pathway_elements.items():
if type(v) is not list:
pathway_elements[k] = [v]
output_pathway = ""
citation_data = ""
if name:
output_pathway += f"Gene Name: {name}\n"
if symbol:
output_pathway += f"Symbol: {symbol}\n"
if taxid:
output_pathway += f"Tax ID: {taxid}\n"
if type_of_gene and type_of_gene != 'unknown':
output_pathway += f"Type of gene: {type_of_gene}\n"
if refseq_genomic:
output_pathway += f"RefSeq genomic: {', '.join(refseq_genomic)}\n"
if refseq_rna:
output_pathway += f"RefSeq rna: {', '.join(refseq_rna)}\n"
if pos:
output_pathway += f"Position: {pos}\n"
output_pathway += f"PATHWAYS\n\n"
for k,v in pathway_elements.items():
output_pathway += f"\n{k}:\n"
for item in v:
output_pathway += f" ID: {item.get('id', '')}"
output_pathway += f" Name: {item.get('name', '')}"
#logging.info(f"Mygene Pathway result {name}, length is {len(output_pathway)}")
output_pathway = output_pathway.strip()
if output_pathway:
processed_result.append((output_pathway,{"citation_data": citation_data}))
return processed_result
def process_pubmed_result(result):
try:
root = ET.fromstring(result)
except Exception as e:
print(f"Cannot parse pubmed result, expected xml. {e}")
print("Adding whole document. Note this will lead to suboptimal results.")
return result if isinstance(result, list) else [result]
processed_result = []
for article in root:
res_ = ""
citation_data = ""
for title in article.iter("Title"):
res_ += f"{title.text}\n"
citation_data += f"{title.text}\n"
for abstract in article.iter("AbstractText"):
res_ += f"{abstract.text}\n"
for author in article.iter("Author"):
try:
citation_data += f"{author.find('LastName').text}"
citation_data += f", {author.find('ForeName').text}\n"
except:
pass
for journal in article.iter("Journal"):
res_ += f"{journal.find('Title').text}\n"
citation_data += f"{journal.find('Title').text}\n"
for volume in article.iter("Volume"):
citation_data += f"{volume.text}\n"
for issue in article.iter("Issue"):
citation_data += f"{issue.text}\n"
for pubdate in article.iter("PubDate"):
try:
year = pubdate.find("Year").text
citation_data += f"{year}"
month = pubdate.find("Month").text
citation_data += f"-{month}"
day = pubdate.find("Day").text
citation_data += f"-{day}\n"
except:
pass
for doi in article.iter("ELocationID"):
if doi.get("EIdType") == "doi":
res_ += f"{doi.text}\n"
if res_:
processed_result.append((res_,{"citation_data": citation_data}))
return processed_result
def get_code_params(code: str, preparam_text: str, postparam_text: str):
l = len(preparam_text)
preparam_index = code.find(preparam_text)
postparam_index = code.find(postparam_text)
if preparam_index == -1 or postparam_index == -1:
return
params = code[preparam_index + l : postparam_index].strip()
if params == "":
return
return params
def validate_llm_response(goal, response):
validation_prompt = f"I gave an LLM this goal: '{goal}' and it gave this response: '{response}'. Is this reasonable, or did something go wrong? [yes|no]"
validation_response = (
openai.Completion.create(
engine="text-davinci-003", prompt=validation_prompt, temperature=0.0
)
.choices[0]
.text.strip()
)
if validation_response.lower() == "yes":
return True
else:
return False
def generate_tool_prompt(task):
if "MYVARIANT" in task:
api_name = "myvariant"
elif "MYGENE" in task:
api_name = "mygene"
elif "PUBMED" in task:
api_name = "PubMed"
else:
print(f"Error. Tool not found in task: {task}")
return None
api_info = api_info_mapping[api_name]
prompt = f"""You have access to query the {api_name} API. If a task starts with '{api_name.upper()}:' then you should create the code to query the {api_name} API based off the documentation and return the code to complete your task. If you use the {api_name} API, do not answer with words, simply write the parameters used to call the function then cease output. Be sure it is valid python that will execute in a python interpreter.
---
Here is the {api_name} documentation
{api_info}
---
You should change the parameters to fit your specific task.
""".strip()
return prompt
def get_ada_embedding(text):
ada_embedding_max_size = 8191
text = text.replace("\n", " ")
if num_tokens_from_string(text) > ada_embedding_max_size:
# There must be a better way to do this.
text = text[:ada_embedding_max_size]
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[
"data"
][0]["embedding"]
def insert_doc_llama_index(index, doc_id, data, metadata={}, embedding=None):
if not embedding:
embedding = get_ada_embedding(data)
doc = Document(text=data, embedding=embedding, doc_id=doc_id, metadata=metadata)
doc.excluded_llm_metadata_keys = ['citation_data']
doc.excluded_embed_metadata_keys = ['citation_data']
index.insert(doc)
def handle_python_result(result, cache, task, doc_store, doc_store_task_key):
results_returned = True
params = result
doc_store["tasks"][doc_store_task_key]["result_code"] = result
tool = task.split(":")[0]
if tool == "MYGENE":
result = "from api.mygene_wrapper import mygene_wrapper\n" + result + "\nret = mygene_wrapper(query_term, size, from_)"
elif tool == "MYVARIANT":
result = "from api.myvariant_wrapper import myvariant_wrapper\n" + result + "\nret = myvariant_wrapper(query_term)"
elif tool == "PUBMED":
result = "from api.pubmed_wrapper import pubmed_wrapper\n" + result + "\nret = pubmed_wrapper(query_term, retmax, retstart)"
executed_result = execute_python(result)
if type(executed_result) is list:
executed_result = list(filter(lambda x : x, executed_result))
if (executed_result is not None) and (not executed_result): # Execution complete succesfully, but executed result was empty list
results_returned = False
result = "NOTE: Code returned no results\n\n" + result
print(Fore.BLUE + f"\nTask '{task}' completed but returned no results")
if "MYVARIANT" in task:
if results_returned:
cache["MYVARIANT"].append(f"---\n{params}---\n")
else:
cache["MYVARIANT"].append(f"---\nNote: This call returned no results\n{params}---\n")
processed_result = process_myvariant_result(executed_result)
if "MYGENE" in task:
if results_returned:
cache["MYGENE"].append(f"---\n{params}---\n")
else:
cache["MYGENE"].append(f"---\nNote: This call returned no results\n{params}---\n")
processed_result = process_mygene_result(executed_result)
if "PUBMED" in task:
if results_returned:
cache["PUBMED"].append(f"---\n{params}---\n")
else:
cache["PUBMED"].append(f"---\nNote: This call returned no results\n{params}---\n")
processed_result = process_pubmed_result(executed_result)
if executed_result is None:
result = "NOTE: Code did not run succesfully\n\n" + result
print(Fore.BLUE + f"Task '{task}' failed. Code {result} did not run succesfully.")
if "MYGENE" in task:
cache["MYGENE"].append(f"---\nNote: This call did not run succesfully\n{params}---\n")
if "PUBMED" in task:
cache["PUBMED"].append(f"---\nNote: This call did not run succesfully\n{params}---\n")
if "MYVARIANT" in task:
cache["MYVARIANT"].append(f"---\nNote: This call did not run succesfully\n{params}---\n")
return
return processed_result
def handle_results(result, index, doc_store, doc_store_key, task_id_counter, RESULT_CUTOFF):
for i, r in enumerate(result):
res, metadata = r[0], r[1]
res = str(res)[
:RESULT_CUTOFF
] # Occasionally an enormous result will slow the program to a halt. Not ideal to lose results but putting in place for now.
vectorized_data = get_ada_embedding(res)
task_id = f"doc_id_{task_id_counter}_{i}"
insert_doc_llama_index(index=index, doc_id=task_id, data=res, metadata=metadata, embedding=vectorized_data)
doc_store["tasks"][doc_store_key]["results"].append(
{
"task_id_counter": task_id_counter,
"vectorized_data": vectorized_data,
"output": res,
"metadata": metadata,
}
)
def query_knowledge_base(
index,
query="Give a detailed but terse overview of all the information. Start with a high level summary and then go into details. Do not include any further instruction. Do not include filler words.",
response_mode="tree_summarize",
top_k=50,
list_index=False
):
if not index.docstore.docs:
print(Fore.RED + "NO INFORMATION IN LLAMA INDEX")
return
# configure retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=top_k,
)
# configure response synthesizer
response_synthesizer = ResponseSynthesizer.from_args(
response_mode="tree_summarize",
)
# assemble query engine
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
)
if list_index:
query_response = index.query(
query, response_mode="default"
)
else:
# From llama index docs: Empirically, setting response_mode="tree_summarize" also leads to better summarization results.
query_response = query_engine.query(query)
extra_info = ""
if query_response.metadata:
try:
extra_info = [x.get("citation_data") for x in query_response.metadata.values()]
if not any(extra_info):
extra_info = []
except Exception as e:
print("Issue getting extra info from llama index")
return query_response.response, '\n\n'.join(extra_info)
def create_index(api_key,summaries=[], temperature=0.0, model_name="gpt-3.5-turbo-16k", max_tokens=6000):
llm_predictor = LLMPredictor(
llm=ChatOpenAI(
temperature=temperature,
openai_api_key=api_key,
model_name=model_name,
max_tokens=max_tokens,
)
)
documents = []
for i, summary in enumerate(summaries):
doc = Document(text=summary, doc_id=str(i))
doc.excluded_llm_metadata_keys = ['citation_data']
doc.excluded_embed_metadata_keys = ['citation_data']
documents.append(doc)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size=4000)
return GPTVectorStoreIndex(documents, service_context=service_context)
def create_graph_index(api_key, indicies=[], summaries=[], temperature=0.0, model_name="text-davinci-003", max_tokens=2000):
llm_predictor = LLMPredictor(
llm=OpenAI(
temperature=temperature,
openai_api_key=api_key,
model_name=model_name,
max_tokens=max_tokens,
)
)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
graph = ComposableGraph.from_indices(
GPTListIndex,
indicies,
index_summaries=summaries,
service_context=service_context
)
return graph
def create_list_index(api_key, summaries=[], temperature=0.0, model_name="text-davinci-003", max_tokens=2000):
llm_predictor = LLMPredictor(
llm=OpenAI(
temperature=temperature,
openai_api_key=api_key,
model_name=model_name,
max_tokens=max_tokens,
)
)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
documents = []
for i, summary in enumerate(summaries):
documents.append(Document(text=summary, doc_id=str(i)))
index = GPTListIndex.from_documents(documents, service_context=service_context)
return index
@backoff.on_exception(
partial(backoff.expo, max_value=50),
(openai.error.RateLimitError, openai.error.APIError, openai.error.APIConnectionError, openai.error.ServiceUnavailableError, openai.error.Timeout),
)
def get_gpt_completion(
prompt,
temp=0.0,
engine="text-davinci-003",
top_p=1,
frequency_penalty=0,
presence_penalty=0,
):
response = openai.Completion.create(
engine=engine,
prompt=prompt,
temperature=temp,
max_tokens=get_max_completion_len(prompt),
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
)
return response.choices[0].text.strip()
@backoff.on_exception(
partial(backoff.expo, max_value=50),
(openai.error.RateLimitError, openai.error.APIError, openai.error.APIConnectionError, openai.error.ServiceUnavailableError, openai.error.Timeout),
)
def get_gpt_chat_completion(
system_prompt, user_prompt, model="gpt-3.5-turbo-16k", temp=0.0
):
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
temperature=temp,
)
return response.choices[0]["message"]["content"].strip()
### FILE UTILS ###
def make_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def write_file(path, contents, mode="w"):
with open(path, mode) as f:
f.write(contents)
def read_file(path, mode="r"):
with open(path, mode) as f:
contents = f.read()
if not contents:
print(f"WARNING: file {path} empty")
return contents
def sanitize_dir_name(dir_name):
# Remove invalid characters
dir_name = re.sub(r'[<>:"/\|?*]', '_', dir_name)
dir_name = dir_name.replace(' ', '_')
# Remove leading period
if dir_name.startswith('.'):
dir_name = dir_name[1:]
return dir_name
def save(
index,
doc_store,
OBJECTIVE,
current_datetime,
task_id_counter,
task_list,
completed_tasks,
cache,
reload_count,
summaries,
):
# Make basepath.
path = os.path.join("./out", sanitize_dir_name(OBJECTIVE) + "_" + current_datetime)
make_dir(path)
# Save llama index.
index.storage_context.persist(persist_dir=os.path.join(path, "index.json"))
# Save program state.
state = {
"summaries": summaries,
"reload_count": reload_count,
"task_id_counter": task_id_counter,
"task_list": list(task_list),
"completed_tasks": completed_tasks,
"cache": dict(cache),
"current_datetime": current_datetime,
"objective": OBJECTIVE,
}
with open(os.path.join(path, "state.json"), "w") as outfile:
json.dump(state, outfile)
# Save results.
if "key_results" in doc_store:
if reload_count:
new_time = str(time.strftime("%Y-%m-%d_%H-%M-%S"))
header = f"# {OBJECTIVE}\nDate: {new_time}\n\n"
else:
header = f"# {OBJECTIVE}\nDate: {current_datetime}\n\n"
key_findings_path = os.path.join(path, f"key_findings_{reload_count}.md")
write_file(key_findings_path, header, mode="a+")
for res in doc_store["key_results"]:
content = f"{res[0]}{res[1]}"
write_file(key_findings_path, content, mode="a+")
for task, doc in doc_store["tasks"].items():
doc_path = os.path.join(path, task)
make_dir(doc_path)
result_path = os.path.join(doc_path, "results")
make_dir(result_path)
if "executive_summary" in doc:
write_file(os.path.join(result_path, "executive_summary.txt"), doc["executive_summary"])
if "result_code" in doc:
write_file(os.path.join(result_path, "api_call.txt"), doc["result_code"])
for i, result in enumerate(doc["results"]):
result_path_i = os.path.join(result_path, str(i))
make_dir(result_path_i)
write_file(os.path.join(result_path_i, "output.txt"), result["output"])
write_file(
os.path.join(result_path_i, "vector.txt"),
str(result["vectorized_data"]),
)
def load(path):
llm_predictor = LLMPredictor(
llm=ChatOpenAI(
temperature=0,
openai_api_key=api_key,
model_name="gpt-3.5-turbo-16k",
max_tokens=6000,
)
)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size=4000)
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=os.path.join(path, "index.json"))
index = load_index_from_storage(storage_context=storage_context, service_context=service_context)
state_path = os.path.join(path, "state.json")
if os.path.exists(state_path):
with open(state_path, "r") as f:
json_data = json.load(f)
try:
reload_count = json_data["reload_count"] + 1
task_id_counter = json_data["task_id_counter"]
task_list = json_data["task_list"]
completed_tasks = json_data["completed_tasks"]
cache = defaultdict(list, json_data["cache"])
current_datetime = json_data["current_datetime"]
objective = json_data["objective"]
summaries = json_data["summaries"]
except KeyError as e:
raise Exception(
f"Missing key '{e.args[0]}' in JSON file at path '{state_path}'"
)
return (
index,
task_id_counter,
deque(task_list),
completed_tasks,
cache,
current_datetime,
objective,
reload_count,
summaries,
)
| [
"llama_index.GPTListIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.indices.composability.ComposableGraph.from_indices",
"llama_index.GPTVectorStoreIndex",
"llama_index.ResponseSynthesizer.from_args",
"llama_index.load_index_from_storage",
"llama_index.query_engine.RetrieverQueryEngine",
"llama_index.Document"
] | [((3024, 3060), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (3045, 3060), False, 'import tiktoken\n'), ((14555, 14629), 'llama_index.Document', 'Document', ([], {'text': 'data', 'embedding': 'embedding', 'doc_id': 'doc_id', 'metadata': 'metadata'}), '(text=data, embedding=embedding, doc_id=doc_id, metadata=metadata)\n', (14563, 14629), False, 'from llama_index import Document, GPTVectorStoreIndex, LLMPredictor, ServiceContext, GPTListIndex\n'), ((18745, 18802), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': 'top_k'}), '(index=index, similarity_top_k=top_k)\n', (18765, 18802), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((18892, 18953), 'llama_index.ResponseSynthesizer.from_args', 'ResponseSynthesizer.from_args', ([], {'response_mode': '"""tree_summarize"""'}), "(response_mode='tree_summarize')\n", (18921, 18953), False, 'from llama_index import VectorStoreIndex, ResponseSynthesizer\n'), ((19017, 19106), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (19037, 19106), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((20428, 20502), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(4000)'}), '(llm_predictor=llm_predictor, chunk_size=4000)\n', (20456, 20502), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((20514, 20577), 'llama_index.GPTVectorStoreIndex', 'GPTVectorStoreIndex', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (20533, 20577), False, 'from llama_index import Document, GPTVectorStoreIndex, LLMPredictor, ServiceContext, GPTListIndex\n'), ((20944, 21001), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (20972, 21001), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((21016, 21133), 'llama_index.indices.composability.ComposableGraph.from_indices', 'ComposableGraph.from_indices', (['GPTListIndex', 'indicies'], {'index_summaries': 'summaries', 'service_context': 'service_context'}), '(GPTListIndex, indicies, index_summaries=\n summaries, service_context=service_context)\n', (21044, 21133), False, 'from llama_index.indices.composability import ComposableGraph\n'), ((21537, 21594), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (21565, 21594), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((21743, 21814), 'llama_index.GPTListIndex.from_documents', 'GPTListIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (21770, 21814), False, 'from llama_index import Document, GPTVectorStoreIndex, LLMPredictor, ServiceContext, GPTListIndex\n'), ((21861, 21896), 'functools.partial', 'partial', (['backoff.expo'], {'max_value': '(50)'}), '(backoff.expo, max_value=50)\n', (21868, 21896), False, 'from functools import partial\n'), ((22854, 23020), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': 'model', 'messages': "[{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content':\n user_prompt}]", 'temperature': 'temp'}), "(model=model, messages=[{'role': 'system',\n 'content': system_prompt}, {'role': 'user', 'content': user_prompt}],\n temperature=temp)\n", (22882, 23020), False, 'import openai\n'), ((22549, 22584), 'functools.partial', 'partial', (['backoff.expo'], {'max_value': '(50)'}), '(backoff.expo, max_value=50)\n', (22556, 22584), False, 'from functools import partial\n'), ((23607, 23644), 're.sub', 're.sub', (['"""[<>:"/\\\\|?*]"""', '"""_"""', 'dir_name'], {}), '(\'[<>:"/\\\\|?*]\', \'_\', dir_name)\n', (23613, 23644), False, 'import re\n'), ((26372, 26446), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(4000)'}), '(llm_predictor=llm_predictor, chunk_size=4000)\n', (26400, 26446), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((26588, 26682), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(storage_context=storage_context, service_context=\n service_context)\n', (26611, 26682), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((26695, 26727), 'os.path.join', 'os.path.join', (['path', '"""state.json"""'], {}), "(path, 'state.json')\n", (26707, 26727), False, 'import os\n'), ((26735, 26761), 'os.path.exists', 'os.path.exists', (['state_path'], {}), '(state_path)\n', (26749, 26761), False, 'import os\n'), ((935, 967), 'logging.getLogger', 'logging.getLogger', (['"""llama_index"""'], {}), "('llama_index')\n", (952, 967), False, 'import logging\n'), ((2855, 2871), 'collections.deque', 'deque', (['task_list'], {}), '(task_list)\n', (2860, 2871), False, 'from collections import defaultdict, deque\n'), ((10387, 10408), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['result'], {}), '(result)\n', (10400, 10408), True, 'import xml.etree.ElementTree as ET\n'), ((23194, 23214), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (23208, 23214), False, 'import os\n'), ((23224, 23241), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (23235, 23241), False, 'import os\n'), ((24651, 24676), 'json.dump', 'json.dump', (['state', 'outfile'], {}), '(state, outfile)\n', (24660, 24676), False, 'import json\n'), ((24991, 25044), 'os.path.join', 'os.path.join', (['path', 'f"""key_findings_{reload_count}.md"""'], {}), "(path, f'key_findings_{reload_count}.md')\n", (25003, 25044), False, 'import os\n'), ((25334, 25358), 'os.path.join', 'os.path.join', (['path', 'task'], {}), '(path, task)\n', (25346, 25358), False, 'import os\n'), ((25408, 25441), 'os.path.join', 'os.path.join', (['doc_path', '"""results"""'], {}), "(doc_path, 'results')\n", (25420, 25441), False, 'import os\n'), ((27558, 27574), 'collections.deque', 'deque', (['task_list'], {}), '(task_list)\n', (27563, 27574), False, 'from collections import defaultdict, deque\n'), ((4361, 4383), 'markdown.markdown', 'markdown.markdown', (['res'], {}), '(res)\n', (4378, 4383), False, 'import markdown\n'), ((4411, 4443), 'markdown.markdown', 'markdown.markdown', (['citation_data'], {}), '(citation_data)\n', (4428, 4443), False, 'import markdown\n'), ((19965, 20075), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temperature', 'openai_api_key': 'api_key', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, openai_api_key=api_key, model_name=\n model_name, max_tokens=max_tokens)\n', (19975, 20075), False, 'from langchain.chat_models import ChatOpenAI\n'), ((20751, 20857), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'openai_api_key': 'api_key', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, openai_api_key=api_key, model_name=\n model_name, max_tokens=max_tokens)\n', (20757, 20857), False, 'from langchain import OpenAI\n'), ((21344, 21450), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'openai_api_key': 'api_key', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, openai_api_key=api_key, model_name=\n model_name, max_tokens=max_tokens)\n', (21350, 21450), False, 'from langchain import OpenAI\n'), ((24193, 24225), 'os.path.join', 'os.path.join', (['path', '"""index.json"""'], {}), "(path, 'index.json')\n", (24205, 24225), False, 'import os\n'), ((24592, 24624), 'os.path.join', 'os.path.join', (['path', '"""state.json"""'], {}), "(path, 'state.json')\n", (24604, 24624), False, 'import os\n'), ((26186, 26289), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'api_key', 'model_name': '"""gpt-3.5-turbo-16k"""', 'max_tokens': '(6000)'}), "(temperature=0, openai_api_key=api_key, model_name=\n 'gpt-3.5-turbo-16k', max_tokens=6000)\n", (26196, 26289), False, 'from langchain.chat_models import ChatOpenAI\n'), ((26541, 26573), 'os.path.join', 'os.path.join', (['path', '"""index.json"""'], {}), "(path, 'index.json')\n", (26553, 26573), False, 'import os\n'), ((26828, 26840), 'json.load', 'json.load', (['f'], {}), '(f)\n', (26837, 26840), False, 'import json\n'), ((14291, 14360), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': '[text]', 'model': '"""text-embedding-ada-002"""'}), "(input=[text], model='text-embedding-ada-002')\n", (14314, 14360), False, 'import openai\n'), ((24785, 24819), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%H-%M-%S"""'], {}), "('%Y-%m-%d_%H-%M-%S')\n", (24798, 24819), False, 'import time\n'), ((25535, 25585), 'os.path.join', 'os.path.join', (['result_path', '"""executive_summary.txt"""'], {}), "(result_path, 'executive_summary.txt')\n", (25547, 25585), False, 'import os\n'), ((25669, 25710), 'os.path.join', 'os.path.join', (['result_path', '"""api_call.txt"""'], {}), "(result_path, 'api_call.txt')\n", (25681, 25710), False, 'import os\n'), ((25916, 25957), 'os.path.join', 'os.path.join', (['result_path_i', '"""output.txt"""'], {}), "(result_path_i, 'output.txt')\n", (25928, 25957), False, 'import os\n'), ((26017, 26058), 'os.path.join', 'os.path.join', (['result_path_i', '"""vector.txt"""'], {}), "(result_path_i, 'vector.txt')\n", (26029, 26058), False, 'import os\n'), ((27121, 27158), 'collections.defaultdict', 'defaultdict', (['list', "json_data['cache']"], {}), "(list, json_data['cache'])\n", (27132, 27158), False, 'from collections import defaultdict, deque\n'), ((12834, 12933), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'validation_prompt', 'temperature': '(0.0)'}), "(engine='text-davinci-003', prompt=\n validation_prompt, temperature=0.0)\n", (12858, 12933), False, 'import openai\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler
from llama_index.legacy.callbacks.arize_phoenix_callback import (
arize_phoenix_callback_handler,
)
from llama_index.legacy.callbacks.base_handler import BaseCallbackHandler
from llama_index.legacy.callbacks.deepeval_callback import deepeval_callback_handler
from llama_index.legacy.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.legacy.callbacks.open_inference_callback import (
OpenInferenceCallbackHandler,
)
from llama_index.legacy.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.legacy.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index.legacy
llama_index.legacy.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
handler = argilla_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.legacy.callbacks.deepeval_callback.deepeval_callback_handler",
"llama_index.legacy.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler",
"llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.legacy.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler"
] | [((1239, 1274), 'llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1259, 1274), False, 'from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1332, 1375), 'llama_index.legacy.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1360, 1375), False, 'from llama_index.legacy.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1433, 1478), 'llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1463, 1478), False, 'from llama_index.legacy.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1532, 1573), 'llama_index.legacy.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1558, 1573), False, 'from llama_index.legacy.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1629, 1662), 'llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1647, 1662), False, 'from llama_index.legacy.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1715, 1755), 'llama_index.legacy.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1740, 1755), False, 'from llama_index.legacy.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1806, 1837), 'llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1822, 1837), False, 'from llama_index.legacy.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((1889, 1928), 'llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (1913, 1928), False, 'from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler
from llama_index.legacy.callbacks.arize_phoenix_callback import (
arize_phoenix_callback_handler,
)
from llama_index.legacy.callbacks.base_handler import BaseCallbackHandler
from llama_index.legacy.callbacks.deepeval_callback import deepeval_callback_handler
from llama_index.legacy.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.legacy.callbacks.open_inference_callback import (
OpenInferenceCallbackHandler,
)
from llama_index.legacy.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.legacy.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index.legacy
llama_index.legacy.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
handler = argilla_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.legacy.callbacks.deepeval_callback.deepeval_callback_handler",
"llama_index.legacy.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler",
"llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.legacy.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler"
] | [((1239, 1274), 'llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1259, 1274), False, 'from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1332, 1375), 'llama_index.legacy.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1360, 1375), False, 'from llama_index.legacy.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1433, 1478), 'llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1463, 1478), False, 'from llama_index.legacy.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1532, 1573), 'llama_index.legacy.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1558, 1573), False, 'from llama_index.legacy.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1629, 1662), 'llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1647, 1662), False, 'from llama_index.legacy.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1715, 1755), 'llama_index.legacy.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1740, 1755), False, 'from llama_index.legacy.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1806, 1837), 'llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1822, 1837), False, 'from llama_index.legacy.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((1889, 1928), 'llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (1913, 1928), False, 'from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler\n')] |
import sys
from langchain import OpenAI
from pathlib import Path
import llama_index as li
#from llamahub.connectors import TextFileConnector
from llama_index import SimpleDirectoryReader,GPTListIndex,LLMPredictor
file_name = sys.argv[1]
llm_predictor = LLMPredictor(llm=OpenAI(model_name="gpt-3.5-turbo")) #temperature=0,
docs = SimpleDirectoryReader('.', [file_name]).load_data()
index = GPTListIndex(docs)
ex = """Today we finish off our study of collaborative filtering by looking closely at embeddings—a critical building block of many deep learning algorithms. Then we’ll dive into convolutional neural networks (CNNs) and see how they really work. We’ve used plenty of CNNs through this course, but we haven’t peeked inside them to see what’s really going on in there. As well as learning about their most fundamental building block, the convolution, we’ll also look at pooling, dropout, and more."""
q = f"""Here's an example of a lesson summary from a previous fast.ai lesson: "{ex}" Write a four paragraph summary of the fast.ai lesson contained in the following transcript, using a similar informal writing style to the above summary from the previous lesson."""
summary = index.query(q, response_mode="tree_summarize", llm_predictor=llm_predictor)
Path(f'{Path(file_name).stem}-summ.txt').write_text(str(summary))
| [
"llama_index.GPTListIndex",
"llama_index.SimpleDirectoryReader"
] | [((391, 409), 'llama_index.GPTListIndex', 'GPTListIndex', (['docs'], {}), '(docs)\n', (403, 409), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((271, 305), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""'}), "(model_name='gpt-3.5-turbo')\n", (277, 305), False, 'from langchain import OpenAI\n'), ((331, 370), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""."""', '[file_name]'], {}), "('.', [file_name])\n", (352, 370), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((1270, 1285), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (1274, 1285), False, 'from pathlib import Path\n')] |
import sys
from langchain import OpenAI
from pathlib import Path
import llama_index as li
#from llamahub.connectors import TextFileConnector
from llama_index import SimpleDirectoryReader,GPTListIndex,LLMPredictor
file_name = sys.argv[1]
llm_predictor = LLMPredictor(llm=OpenAI(model_name="gpt-3.5-turbo")) #temperature=0,
docs = SimpleDirectoryReader('.', [file_name]).load_data()
index = GPTListIndex(docs)
ex = """Today we finish off our study of collaborative filtering by looking closely at embeddings—a critical building block of many deep learning algorithms. Then we’ll dive into convolutional neural networks (CNNs) and see how they really work. We’ve used plenty of CNNs through this course, but we haven’t peeked inside them to see what’s really going on in there. As well as learning about their most fundamental building block, the convolution, we’ll also look at pooling, dropout, and more."""
q = f"""Here's an example of a lesson summary from a previous fast.ai lesson: "{ex}" Write a four paragraph summary of the fast.ai lesson contained in the following transcript, using a similar informal writing style to the above summary from the previous lesson."""
summary = index.query(q, response_mode="tree_summarize", llm_predictor=llm_predictor)
Path(f'{Path(file_name).stem}-summ.txt').write_text(str(summary))
| [
"llama_index.GPTListIndex",
"llama_index.SimpleDirectoryReader"
] | [((391, 409), 'llama_index.GPTListIndex', 'GPTListIndex', (['docs'], {}), '(docs)\n', (403, 409), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((271, 305), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""'}), "(model_name='gpt-3.5-turbo')\n", (277, 305), False, 'from langchain import OpenAI\n'), ((331, 370), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""."""', '[file_name]'], {}), "('.', [file_name])\n", (352, 370), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((1270, 1285), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (1274, 1285), False, 'from pathlib import Path\n')] |
Subsets and Splits