code
stringlengths 161
233k
| apis
sequencelengths 1
24
| extract_api
stringlengths 162
68.5k
|
---|---|---|
from io import BytesIO
from flask import Flask, jsonify
import os
# import tweepy
from dotenv import load_dotenv
from flask import request,jsonify
import snscrape.modules.twitter as snstwitter
import requests
from goose3 import Goose
from wordcloud import WordCloud, STOPWORDS
import plotly.graph_objs as go
import json
import plotly
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import base64
import pandas as pd
# from flask import send_file
from flask import send_file
import datetime
import plotly.express as px
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import logging
import sys
from llama_index import GPTVectorStoreIndex, TwitterTweetReader
import os
import llama_index
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
import requests
from bs4 import BeautifulSoup
from urllib.parse import quote
import serpapi
from serpapi import GoogleSearch
import os
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SequentialChain
from langchain.memory import ConversationBufferMemory
# from langchain.utilities import WikipediaAPIWrapper
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from dotenv import load_dotenv
from flask_cors import CORS
from llama_index import SimpleDirectoryReader
from llama_index import GPTVectorStoreIndex
import llama_index
import io
import base64
import matplotlib.pyplot as plt
import seaborn as sns
app = Flask(__name__)
CORS(app)
twitterData = None
queryString = None
# print(type(twitterData))
load_dotenv()
print(os.getenv("HUGGINGFACE_API"))
print(os.getenv('OPENAI_API_KEY'))
print(os.getenv('BEARER_TOKEN'))
os.getenv('OPENAI_API_KEY')
@app.route('/')
def hello_geek():
return '<h1>Hello from Flask & Docker</h2>'
@app.route('/twitter')
def twitter():
query = request.args['query']
retweet = 0
likecount = 0
hashtags = set([])
i=0
global twitterData
global queryString
print("Url: Twitter, data: ", twitterData)
print("Url: Twitter, query: ", queryString)
twitterData = snstwitter.TwitterSearchScraper(query).get_items()
for tweet in twitterData:
print("looping through tweets")
print(vars(tweet))
likecount += tweet.likeCount
retweet += tweet.retweetCount + tweet.quoteCount
if(tweet.hashtags != None):
for h in tweet.hashtags:
hashtags.add(h)
i+= 1
if(i==200):
break
tweets = {"likecount":likecount,"retweet":retweet,"hashtags":list(hashtags),"count":i}
print(tweets)
return jsonify({'result':tweets})
os.getenv('SERPAPI_API_KEY')
#For getting the realted link - by providing the URL
@app.route('/search', methods=['GET'])
def search():
article_url = request.args.get('url')
response = requests.get(article_url)
soup = BeautifulSoup(response.url, 'html.parser')
header = soup.find('h1').url.strip()
search_query = quote(header)
params = {
'q': search_query,
'hl': 'en',
'gl': 'us',
'api_key': os.getenv('SERPAPI_API_KEY')
}
search = GoogleSearch(params)
results = search.get_dict().get('organic_results', [])
links = [result['link'] for result in results]
return jsonify({'article_header': header, 'related_links': links})
# To use LLM to check the factual accuracy of the news
@app.route('/classify_news', methods=['GET'])
def classify_news():
prompt = request.args['url']
tool_names = ["serpapi"]
tools = load_tools(tool_names)
title_template = PromptTemplate(
input_variables = ['topic'],
template='To classify the news: {topic} in to the categories like murder, fire, accident, natural disaster, etc'
)
# script_template = PromptTemplate(
# input_variables = ['title', 'wikipedia_research'],
# #template='Look for the authenticity and the accuracy of the news listed: {title} provide the explanation of whether it is factually correct or is there any information present on wikipedia and also provide the correct answer or result if there is:{wikipedia_research} '
# template='Please verify the authenticity and accuracy of the news provided in the {title} by cross-referencing it with the corresponding {wikipedia_research} page. Examine the information available on Wikipedia and determine whether the news is factually correct or accurate. Additionally, if there is any conflicting or misleading information, please provide the correct answer or result based on your research from Wikipedia. '
# )
title_memory = ConversationBufferMemory(input_key='topic', memory_key='chat_history')
# script_memory = ConversationBufferMemory(input_key='title', memory_key='chat_history')
llm = OpenAI(temperature=0.9)
title_chain = LLMChain(llm=llm, prompt=title_template, verbose=True, output_key='title', memory=title_memory)
# script_chain = LLMChain(llm=llm, prompt=script_template, verbose=True, output_key='script', memory=script_memory)
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
# wiki = WikipediaAPIWrapper()
if prompt:
title = title_chain.run(prompt)
a=agent.run(f"{prompt}. Also, provide the realted links")
return {
'title': title_memory.buffer,
'script': a
}
@app.route('/xyz')
def xyz():
query = request.args['query']
tweets = []
for tweet in snstwitter.TwitterProfileScraper(query).get_items():
tweets.append(tweet.date)
return tweets
API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
headers = {"Authorization": "Bearer " + os.getenv('HUGGINGFACE_API') }
API_URL_PROP = "https://api-inference.huggingface.co/models/valurank/distilroberta-propaganda-2class"
API_URL_HATE = "https://api-inference.huggingface.co/models/IMSyPP/hate_speech_en"
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
def queryprop(payload):
response = requests.post(API_URL_PROP, headers=headers, json=payload)
return response.json()
def query_hate(payload):
response = requests.post(API_URL_HATE, headers=headers, json=payload)
return response.json()
@app.route('/sentiment')
def sentiment():
query = request.args['query']
retweet = 0
likecount = 0
hashtags = []
senti=[]
i=0
positive=0
negative=0
neutral=0
global twitterData
global queryString
print("Url: Sentiment, data: ", twitterData)
twitterData = snstwitter.TwitterSearchScraper(query).get_items()
for tweet in twitterData:
if tweet.lang=="en":
i+=1
if(i==200):
break
sentence= tweet.rawContent
print(sentence)
sid_obj = SentimentIntensityAnalyzer()
sentiment_dict = sid_obj.polarity_scores([sentence])
print(sentiment_dict['neg']*100, "% Negative")
print(sentiment_dict['pos']*100, "% Positive")
print("Review Overall Analysis", end = " ")
if sentiment_dict['compound'] >= 0.05 :
positive+=1
elif sentiment_dict['compound'] <= -0.05 :
negative+=1
else :
neutral+=1
senti={"positive":positive, "negative":negative, "neutral":neutral}
labels = list(senti.keys())
values = list(senti.values())
data = {"Target": ["Positive","Negative", "Neutral"], "Value": [positive, negative, neutral]}
df = pd.DataFrame(data)
target=["Positive","Negative", "Neutral"]
value=[positive, negative, neutral]
# palette_color = sns.color_palette('bright')
#plt.pie(value, labels=target, colors=palette_color, autopct='%.0f%%')
sns.barplot(x="Target" , y="Value", data=df, palette="Set2")
plt.title("Sentiment Analysis on the Tweets related to Article")
plt.savefig('senti.png')
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
# print(base64_string)
# return base64_string
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
# print(base64_string)
# return base64_string
# # return {"labels":labels, "values":values}
return send_file("./senti.png", mimetype='image/png')
@app.route('/sentiment_article')
def sentiment_article():
senti=[]
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
sentence1 = articles.cleaned_text
sid_obj = SentimentIntensityAnalyzer()
sentiment_dict = sid_obj.polarity_scores([sentence1])
print(sentiment_dict['neg']*100, "% Negative")
print(sentiment_dict['pos']*100, "% Positive")
print("Review Overall Analysis", end = " ")
if sentiment_dict['compound'] >= 0.05 :
senti.append("Positive")
elif sentiment_dict['compound'] <= -0.05 :
senti.append("Negative")
else :
senti.append("Neutral")
return jsonify({"result":senti,"pos":sentiment_dict})
@app.route('/summary')
def summary():
try:
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
output = query({
"inputs": articles.cleaned_text
})
print(output)
except:
return "Please put the relevant url article"
return jsonify({"result": output[0]['summary_text']})
@app.route('/cloud2')
def plotly_wordcloud2():
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
url = articles.cleaned_text
wordcloud = WordCloud(width=1280, height=853, margin=0,
colormap='Blues').generate(url)
wordcloud.to_file("./wordcloud.png")
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.margins(x=0, y=0)
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
# print(base64_string)
# return base64_string
return send_file("./wordcloud.png", mimetype='image/png')
@app.route('/propaganda')
def propaganda():
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
output = queryprop({
"inputs": articles.cleaned_text[0:600]
})
yes = output[0][0]['score']
no = 1 - yes
data = {"Target": ["Propagandastic","Non-Propagandastic"], "Value": [yes, no]}
df = pd.DataFrame(data)
sns.barplot(x="Target" , y="Value", data=df, palette="Set2")
plt.title("Propagandastic Evaluation of the Article")
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
plt.savefig('propaganda.png')
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
# print(base64_string)
# return base64_string
return send_file("./propaganda.png", mimetype='image/png')
# return base64_string
# return jsonify({"yes": yes, "no": no})
@app.route("/chat", methods=["GET"])
def chat():
# Get the query from the request body.
query = request.args['url']
# create an app in https://developer.twitter.com/en/apps
# create reader, specify twitter handles
reader = TwitterTweetReader(os.getenv('BEARER_TOKEN'))
documents = reader.load_data(["ANI"])
documents1 = reader.load_data(["ZeeNews"])
documents2 = reader.load_data(["TV9Bharatvarsh"])
documents3 = reader.load_data(["Republic_Bharat"])
documents4 = reader.load_data(["AajTak"])
# Create a new instance of the llama chatbot agent.
agent = llama_index.GPTVectorStoreIndex.from_documents(documents1+documents+documents2+documents3+documents4)
chat_engine = agent.as_chat_engine(verbose=True)
# Get the response from the llama chatbot agent.
response = chat_engine.chat(query)
# Return the response as JSON.
return jsonify({"response": response})
@app.route('/hate-speech')
def hate():
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
url = articles.cleaned_text
analyzer = SentimentIntensityAnalyzer()
# the object outputs the scores into a dict
sentiment_dict = analyzer.polarity_scores(url)
if sentiment_dict['compound'] >= 0.05 :
category = ("Positive ")
elif sentiment_dict['compound'] <= - 0.05 :
category = ("Negative ")
else :
category = ("Neutral ")
print(category)
if category == "Negative ":
res='Hate Speech'
else:
res='Not Hate Speech'
return jsonify({"sentiment":category,"verdict":res})
@app.route('/multi-class')
def category():
url = request.args['url']
# Print the output url.
print(url)
output=query_hate({
"inputs": [str(url)],
"keywords": ["LABEL_0", "LABEL_1", "LABEL_2", "LABEL_3"]})
# print(output[0])
result = {}
if url:
for data in output[0]:
if data['label'] == "LABEL_0":
result["ACCEPTABLE"] = round(data['score']*100, 2)
elif data['label'] == "LABEL_1":
result["INAPPROPRIATE"] = round(data['score']*100, 2)
elif data['label'] == "LABEL_2":
result["OFFENSIVE"] = round(data['score']*100, 2)
elif data['label'] == "LABEL_3":
result["VIOLENT"] = round(data['score']*100, 2)
labels = list(result.keys())
values = list(result.values())
data = {"Target":list(result.keys()) , "Value": list(result.values())}
df = pd.DataFrame(data)
sns.barplot(x="Target" , y="Value", data=df, palette="Set2")
plt.title("Hate Speech Params Detection present in Article")
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
plt.savefig('hate.png')
# fig = plt.gcf()
# buf = io.BytesIO()
# fig.savefig(buf, format="png")
# buf.seek(0)
# base64_string = base64.b64encode(buf.read()).decode("utf-8")
# print(base64_string)
# return base64_string
return send_file("./hate.png", mimetype='image/png')
# return jsonify({"result":result})
@app.route('/authenticity')
def auth():
url = request.args['url']
lis = []
df = pd.read_csv('blacklist.csv')
for i in range(len(df)):
lis.append(df.loc[i, "MBFC"])
for l in lis:
if(url.__contains__(l)):
return {"authentic":False}
return { "authentic": True }
@app.route('/bot-activity')
def botActivity():
url = request.args['url']
i=0
usernames = []
time = []
finalusername = []
for tweet in snstwitter.TwitterSearchScraper(url).get_items():
usernames.append(tweet.user.username)
time.append(tweet.date)
if(i==150):
break
i+=1
flag = False
for i in range(len(time)-1):
a = time[i]
b = time[i+1]
c = a-b
if(c.seconds <= 60):
finalusername.append(usernames[i+1])
print("username: ", finalusername)
if(len(finalusername) > 3):
flag = True
return jsonify({"bots":list(set(finalusername)),"flag":flag})
if __name__ == '__main__':
app.run(host="0.0.0.0",port=5000,debug=True)
| [
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((1520, 1535), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1525, 1535), False, 'from flask import Flask, jsonify\n'), ((1536, 1545), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (1540, 1545), False, 'from flask_cors import CORS\n'), ((1613, 1626), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1624, 1626), False, 'from dotenv import load_dotenv\n'), ((1734, 1761), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1743, 1761), False, 'import os\n'), ((2726, 2754), 'os.getenv', 'os.getenv', (['"""SERPAPI_API_KEY"""'], {}), "('SERPAPI_API_KEY')\n", (2735, 2754), False, 'import os\n'), ((1634, 1662), 'os.getenv', 'os.getenv', (['"""HUGGINGFACE_API"""'], {}), "('HUGGINGFACE_API')\n", (1643, 1662), False, 'import os\n'), ((1670, 1697), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1679, 1697), False, 'import os\n'), ((1705, 1730), 'os.getenv', 'os.getenv', (['"""BEARER_TOKEN"""'], {}), "('BEARER_TOKEN')\n", (1714, 1730), False, 'import os\n'), ((2699, 2726), 'flask.jsonify', 'jsonify', (["{'result': tweets}"], {}), "({'result': tweets})\n", (2706, 2726), False, 'from flask import request, jsonify\n'), ((2879, 2902), 'flask.request.args.get', 'request.args.get', (['"""url"""'], {}), "('url')\n", (2895, 2902), False, 'from flask import request, jsonify\n'), ((2918, 2943), 'requests.get', 'requests.get', (['article_url'], {}), '(article_url)\n', (2930, 2943), False, 'import requests\n'), ((2955, 2997), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.url', '"""html.parser"""'], {}), "(response.url, 'html.parser')\n", (2968, 2997), False, 'from bs4 import BeautifulSoup\n'), ((3059, 3072), 'urllib.parse.quote', 'quote', (['header'], {}), '(header)\n', (3064, 3072), False, 'from urllib.parse import quote\n'), ((3224, 3244), 'serpapi.GoogleSearch', 'GoogleSearch', (['params'], {}), '(params)\n', (3236, 3244), False, 'from serpapi import GoogleSearch\n'), ((3368, 3427), 'flask.jsonify', 'jsonify', (["{'article_header': header, 'related_links': links}"], {}), "({'article_header': header, 'related_links': links})\n", (3375, 3427), False, 'from flask import request, jsonify\n'), ((3627, 3649), 'langchain.agents.load_tools', 'load_tools', (['tool_names'], {}), '(tool_names)\n', (3637, 3649), False, 'from langchain.agents import load_tools\n'), ((3672, 3837), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['topic']", 'template': '"""To classify the news: {topic} in to the categories like murder, fire, accident, natural disaster, etc"""'}), "(input_variables=['topic'], template=\n 'To classify the news: {topic} in to the categories like murder, fire, accident, natural disaster, etc'\n )\n", (3686, 3837), False, 'from langchain.prompts import PromptTemplate\n'), ((4677, 4747), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'input_key': '"""topic"""', 'memory_key': '"""chat_history"""'}), "(input_key='topic', memory_key='chat_history')\n", (4701, 4747), False, 'from langchain.memory import ConversationBufferMemory\n'), ((4852, 4875), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (4858, 4875), False, 'from langchain.llms import OpenAI\n'), ((4895, 4994), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'title_template', 'verbose': '(True)', 'output_key': '"""title"""', 'memory': 'title_memory'}), "(llm=llm, prompt=title_template, verbose=True, output_key='title',\n memory=title_memory)\n", (4903, 4994), False, 'from langchain.chains import LLMChain, SequentialChain\n'), ((5123, 5202), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (5139, 5202), False, 'from langchain.agents import initialize_agent\n'), ((6021, 6074), 'requests.post', 'requests.post', (['API_URL'], {'headers': 'headers', 'json': 'payload'}), '(API_URL, headers=headers, json=payload)\n', (6034, 6074), False, 'import requests\n'), ((6136, 6194), 'requests.post', 'requests.post', (['API_URL_PROP'], {'headers': 'headers', 'json': 'payload'}), '(API_URL_PROP, headers=headers, json=payload)\n', (6149, 6194), False, 'import requests\n'), ((6257, 6315), 'requests.post', 'requests.post', (['API_URL_HATE'], {'headers': 'headers', 'json': 'payload'}), '(API_URL_HATE, headers=headers, json=payload)\n', (6270, 6315), False, 'import requests\n'), ((7658, 7676), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (7670, 7676), True, 'import pandas as pd\n'), ((7894, 7953), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Target"""', 'y': '"""Value"""', 'data': 'df', 'palette': '"""Set2"""'}), "(x='Target', y='Value', data=df, palette='Set2')\n", (7905, 7953), True, 'import seaborn as sns\n'), ((7959, 8023), 'matplotlib.pyplot.title', 'plt.title', (['"""Sentiment Analysis on the Tweets related to Article"""'], {}), "('Sentiment Analysis on the Tweets related to Article')\n", (7968, 8023), True, 'import matplotlib.pyplot as plt\n'), ((8029, 8053), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""senti.png"""'], {}), "('senti.png')\n", (8040, 8053), True, 'import matplotlib.pyplot as plt\n'), ((8567, 8613), 'flask.send_file', 'send_file', (['"""./senti.png"""'], {'mimetype': '"""image/png"""'}), "('./senti.png', mimetype='image/png')\n", (8576, 8613), False, 'from flask import send_file\n'), ((8742, 8749), 'goose3.Goose', 'Goose', ([], {}), '()\n', (8747, 8749), False, 'from goose3 import Goose\n'), ((8836, 8864), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (8862, 8864), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((9285, 9334), 'flask.jsonify', 'jsonify', (["{'result': senti, 'pos': sentiment_dict}"], {}), "({'result': senti, 'pos': sentiment_dict})\n", (9292, 9334), False, 'from flask import request, jsonify\n'), ((9654, 9700), 'flask.jsonify', 'jsonify', (["{'result': output[0]['summary_text']}"], {}), "({'result': output[0]['summary_text']})\n", (9661, 9700), False, 'from flask import request, jsonify\n'), ((9791, 9798), 'goose3.Goose', 'Goose', ([], {}), '()\n', (9796, 9798), False, 'from goose3 import Goose\n'), ((10024, 10071), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {'interpolation': '"""bilinear"""'}), "(wordcloud, interpolation='bilinear')\n", (10034, 10071), True, 'import matplotlib.pyplot as plt\n'), ((10076, 10091), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10084, 10091), True, 'import matplotlib.pyplot as plt\n'), ((10096, 10117), 'matplotlib.pyplot.margins', 'plt.margins', ([], {'x': '(0)', 'y': '(0)'}), '(x=0, y=0)\n', (10107, 10117), True, 'import matplotlib.pyplot as plt\n'), ((10355, 10405), 'flask.send_file', 'send_file', (['"""./wordcloud.png"""'], {'mimetype': '"""image/png"""'}), "('./wordcloud.png', mimetype='image/png')\n", (10364, 10405), False, 'from flask import send_file\n'), ((10503, 10510), 'goose3.Goose', 'Goose', ([], {}), '()\n', (10508, 10510), False, 'from goose3 import Goose\n'), ((10769, 10787), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (10781, 10787), True, 'import pandas as pd\n'), ((10792, 10851), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Target"""', 'y': '"""Value"""', 'data': 'df', 'palette': '"""Set2"""'}), "(x='Target', y='Value', data=df, palette='Set2')\n", (10803, 10851), True, 'import seaborn as sns\n'), ((10857, 10910), 'matplotlib.pyplot.title', 'plt.title', (['"""Propagandastic Evaluation of the Article"""'], {}), "('Propagandastic Evaluation of the Article')\n", (10866, 10910), True, 'import matplotlib.pyplot as plt\n'), ((11085, 11114), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""propaganda.png"""'], {}), "('propaganda.png')\n", (11096, 11114), True, 'import matplotlib.pyplot as plt\n'), ((11352, 11403), 'flask.send_file', 'send_file', (['"""./propaganda.png"""'], {'mimetype': '"""image/png"""'}), "('./propaganda.png', mimetype='image/png')\n", (11361, 11403), False, 'from flask import send_file\n'), ((12085, 12198), 'llama_index.GPTVectorStoreIndex.from_documents', 'llama_index.GPTVectorStoreIndex.from_documents', (['(documents1 + documents + documents2 + documents3 + documents4)'], {}), '(documents1 + documents +\n documents2 + documents3 + documents4)\n', (12131, 12198), False, 'import llama_index\n'), ((12380, 12411), 'flask.jsonify', 'jsonify', (["{'response': response}"], {}), "({'response': response})\n", (12387, 12411), False, 'from flask import request, jsonify\n'), ((12495, 12502), 'goose3.Goose', 'Goose', ([], {}), '()\n', (12500, 12502), False, 'from goose3 import Goose\n'), ((12584, 12612), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (12610, 12612), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((13057, 13105), 'flask.jsonify', 'jsonify', (["{'sentiment': category, 'verdict': res}"], {}), "({'sentiment': category, 'verdict': res})\n", (13064, 13105), False, 'from flask import request, jsonify\n'), ((14023, 14041), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (14035, 14041), True, 'import pandas as pd\n'), ((14051, 14110), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Target"""', 'y': '"""Value"""', 'data': 'df', 'palette': '"""Set2"""'}), "(x='Target', y='Value', data=df, palette='Set2')\n", (14062, 14110), True, 'import seaborn as sns\n'), ((14116, 14176), 'matplotlib.pyplot.title', 'plt.title', (['"""Hate Speech Params Detection present in Article"""'], {}), "('Hate Speech Params Detection present in Article')\n", (14125, 14176), True, 'import matplotlib.pyplot as plt\n'), ((14351, 14374), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hate.png"""'], {}), "('hate.png')\n", (14362, 14374), True, 'import matplotlib.pyplot as plt\n'), ((14612, 14657), 'flask.send_file', 'send_file', (['"""./hate.png"""'], {'mimetype': '"""image/png"""'}), "('./hate.png', mimetype='image/png')\n", (14621, 14657), False, 'from flask import send_file\n'), ((14805, 14833), 'pandas.read_csv', 'pd.read_csv', (['"""blacklist.csv"""'], {}), "('blacklist.csv')\n", (14816, 14833), True, 'import pandas as pd\n'), ((3175, 3203), 'os.getenv', 'os.getenv', (['"""SERPAPI_API_KEY"""'], {}), "('SERPAPI_API_KEY')\n", (3184, 3203), False, 'import os\n'), ((5770, 5798), 'os.getenv', 'os.getenv', (['"""HUGGINGFACE_API"""'], {}), "('HUGGINGFACE_API')\n", (5779, 5798), False, 'import os\n'), ((9432, 9439), 'goose3.Goose', 'Goose', ([], {}), '()\n', (9437, 9439), False, 'from goose3 import Goose\n'), ((11741, 11766), 'os.getenv', 'os.getenv', (['"""BEARER_TOKEN"""'], {}), "('BEARER_TOKEN')\n", (11750, 11766), False, 'import os\n'), ((2142, 2180), 'snscrape.modules.twitter.TwitterSearchScraper', 'snstwitter.TwitterSearchScraper', (['query'], {}), '(query)\n', (2173, 2180), True, 'import snscrape.modules.twitter as snstwitter\n'), ((5541, 5580), 'snscrape.modules.twitter.TwitterProfileScraper', 'snstwitter.TwitterProfileScraper', (['query'], {}), '(query)\n', (5573, 5580), True, 'import snscrape.modules.twitter as snstwitter\n'), ((6654, 6692), 'snscrape.modules.twitter.TwitterSearchScraper', 'snstwitter.TwitterSearchScraper', (['query'], {}), '(query)\n', (6685, 6692), True, 'import snscrape.modules.twitter as snstwitter\n'), ((6926, 6954), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (6952, 6954), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((9881, 9942), 'wordcloud.WordCloud', 'WordCloud', ([], {'width': '(1280)', 'height': '(853)', 'margin': '(0)', 'colormap': '"""Blues"""'}), "(width=1280, height=853, margin=0, colormap='Blues')\n", (9890, 9942), False, 'from wordcloud import WordCloud, STOPWORDS\n'), ((15185, 15221), 'snscrape.modules.twitter.TwitterSearchScraper', 'snstwitter.TwitterSearchScraper', (['url'], {}), '(url)\n', (15216, 15221), True, 'import snscrape.modules.twitter as snstwitter\n')] |
import sqlite3
import pandas as pd
import llama_index
import os
import openai
from IPython.display import Markdown, display
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
text
)
from llama_index import SQLDatabase, ServiceContext
from llama_index.llms import OpenAI
from llama_index.indices.struct_store.sql_query import NLSQLTableQueryEngine
# Create a new SQLite database (or connect to an existing one)
def create_and_load_db():
# Connect to the SQLite database (or create a new one)
conn = sqlite3.connect('company_info.db')
# Read the CSV file into a Pandas DataFrame
df = pd.read_csv('C:\\Users\\Nsahni\\Downloads\\Github\\Bynd\\company_information_db.csv')
# Write the data to a SQLite table
df.to_sql('company_table', conn, if_exists='replace', index=False)
return conn
def execute_query(conn, query):
# Query the table
query_result = pd.read_sql_query(query, conn)
print(query_result)
conn = create_and_load_db()
with open('config.txt', 'r') as f:
openai.api_key = f.read().strip()
llm = OpenAI(temperature=0, model="gpt-4")
engine = create_engine('sqlite:///company_info.db')
metadata_obj = MetaData()
metadata_obj.create_all(engine)
service_context = ServiceContext.from_defaults(llm=llm)
sql_database = SQLDatabase(engine, include_tables=['company_table'])
metadata_obj = MetaData()
# with engine.connect() as con:
# rows = con.execute(text("SELECT * FROM company_table where market_cap > 10000000"))
# for row in rows:
# print(row)
query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["company_table"],
)
query_str = input("Please enter the query you are looking for: ")
response = query_engine.query(query_str)
# response_df = pd.DataFrame(response)
print(response)
# print(response)
# execute_query(conn, "SELECT * FROM company_table limit 10")
# Close the connection
conn.close() | [
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.SQLDatabase",
"llama_index.indices.struct_store.sql_query.NLSQLTableQueryEngine"
] | [((1122, 1158), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (1128, 1158), False, 'from llama_index.llms import OpenAI\n'), ((1168, 1210), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///company_info.db"""'], {}), "('sqlite:///company_info.db')\n", (1181, 1210), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, select, text\n'), ((1226, 1236), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (1234, 1236), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, select, text\n'), ((1287, 1324), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (1315, 1324), False, 'from llama_index import SQLDatabase, ServiceContext\n'), ((1340, 1393), 'llama_index.SQLDatabase', 'SQLDatabase', (['engine'], {'include_tables': "['company_table']"}), "(engine, include_tables=['company_table'])\n", (1351, 1393), False, 'from llama_index import SQLDatabase, ServiceContext\n'), ((1409, 1419), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (1417, 1419), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, select, text\n'), ((1604, 1678), 'llama_index.indices.struct_store.sql_query.NLSQLTableQueryEngine', 'NLSQLTableQueryEngine', ([], {'sql_database': 'sql_database', 'tables': "['company_table']"}), "(sql_database=sql_database, tables=['company_table'])\n", (1625, 1678), False, 'from llama_index.indices.struct_store.sql_query import NLSQLTableQueryEngine\n'), ((578, 612), 'sqlite3.connect', 'sqlite3.connect', (['"""company_info.db"""'], {}), "('company_info.db')\n", (593, 612), False, 'import sqlite3\n'), ((671, 761), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Nsahni\\\\Downloads\\\\Github\\\\Bynd\\\\company_information_db.csv"""'], {}), "(\n 'C:\\\\Users\\\\Nsahni\\\\Downloads\\\\Github\\\\Bynd\\\\company_information_db.csv')\n", (682, 761), True, 'import pandas as pd\n'), ((958, 988), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'conn'], {}), '(query, conn)\n', (975, 988), True, 'import pandas as pd\n')] |
import os
from typing import Optional, Dict
import openai
import pandas as pd
from langchain.llms import OpenAI
import llama_index
from llama_index.readers.schema.base import Document
from llama_index import SimpleWebPageReader, QuestionAnswerPrompt
from llama_index import ServiceContext, StorageContext, load_index_from_storage
from llama_index import LLMPredictor, OpenAIEmbedding
from llama_index.indices.vector_store.base import VectorStore
from mindsdb.integrations.libs.base import BaseMLEngine
from mindsdb.utilities.config import Config
def _validate_prompt_template(prompt_template: str):
if '{context_str}' not in prompt_template or '{query_str}' not in prompt_template:
raise Exception(
"Provided prompt template is invalid, missing `{context_str}`, `{query_str}`. Please ensure both placeholders are present and try again.") # noqa
class LlamaIndexHandler(BaseMLEngine):
""" Integration with the LlamaIndex data framework for LLM applications. """
name = 'llama_index'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generative = True
self.default_index_class = 'GPTVectorStoreIndex'
self.supported_index_class = ['GPTVectorStoreIndex']
self.default_reader = 'DFReader'
self.supported_reader = ['DFReader', 'SimpleWebPageReader']
@staticmethod
def create_validation(target, args=None, **kwargs):
if 'prompt_template' in args['using']:
_validate_prompt_template(args['using']['prompt_template'])
if args['using'].get('mode') == 'conversational':
for param in ('user_column', 'assistant_column'):
if param not in args['using']:
raise Exception(f'Conversational mode requires {param} parameter')
def create(self, target: str, df: Optional[pd.DataFrame] = None, args: Optional[Dict] = None) -> None:
if 'using' not in args:
raise Exception("LlamaIndex engine requires a USING clause! Refer to its documentation for more details.")
if 'index_class' not in args['using']:
args['using']['index_class'] = self.default_index_class
elif args['using']['index_class'] not in self.supported_index_class:
raise Exception(f"Invalid index class argument. Please use one of {self.supported_index_class}")
if 'reader' not in args['using']:
args['using']['reader'] = self.default_reader
elif args['using']['reader'] not in self.supported_reader:
raise Exception(f"Invalid operation mode. Please use one of {self.supported_reader}")
# workaround to create llama model without input data
if df is None or df.empty:
df = pd.DataFrame([{'text': ''}])
if args['using']['reader'] == 'DFReader':
dstrs = df.apply(lambda x: ', '.join([f'{col}: {str(entry)}' for col, entry in zip(df.columns, x)]), axis=1)
reader = list(map(lambda x: Document(x), dstrs.tolist()))
elif args['using']['reader'] == 'SimpleWebPageReader':
if 'source_url_link' not in args['using']:
raise Exception("SimpleWebPageReader requires a `source_url_link` parameter. Refer to LlamaIndex documentation for more details.") # noqa
reader = SimpleWebPageReader(html_to_text=True).load_data([args['using']['source_url_link']])
else:
raise Exception(f"Invalid operation mode. Please use one of {self.supported_reader}.")
self.model_storage.json_set('args', args)
index = self._setup_index(reader)
path = self.model_storage.folder_get('context')
index.storage_context.persist(persist_dir=path)
self.model_storage.folder_sync('context')
def update(self, args) -> None:
prompt_template = args['using'].get('prompt_template', args.get('prompt_template', None))
if prompt_template is not None:
_validate_prompt_template(prompt_template)
args_cur = self.model_storage.json_get('args')
args_cur['using'].update(args['using'])
# check new set of arguments
self.create_validation(None, args_cur)
self.model_storage.json_set('args', args_cur)
def predict(self, df: Optional[pd.DataFrame] = None, args: Optional[Dict] = None) -> pd.DataFrame:
pred_args = args['predict_params'] if args else {}
args = self.model_storage.json_get('args')
engine_kwargs = {}
if args['using'].get('mode') == 'conversational':
user_column = args['using']['user_column']
assistant_column = args['using']['assistant_column']
messages = []
for row in df[:-1].to_dict('records'):
messages.append(f'user: {row[user_column]}')
messages.append(f'assistant: {row[assistant_column]}')
conversation = '\n'.join(messages)
questions = [
df.iloc[-1][user_column]
]
if 'prompt' in pred_args and pred_args['prompt'] is not None:
user_prompt = pred_args['prompt']
else:
user_prompt = args['using'].get('prompt', '')
prompt_template = f'{user_prompt}\n'\
f'---------------------\n' \
f'We have provided context information below. \n' \
f'{{context_str}}\n' \
f'---------------------\n' \
f'This is previous conversation history:\n' \
f'{conversation}\n' \
f'---------------------\n' \
f'Given this information, please answer the question: {{query_str}}'
engine_kwargs['text_qa_template'] = QuestionAnswerPrompt(prompt_template)
else:
input_column = args['using'].get('input_column', None)
prompt_template = args['using'].get('prompt_template', args.get('prompt_template', None))
if prompt_template is not None:
_validate_prompt_template(prompt_template)
engine_kwargs['text_qa_template'] = QuestionAnswerPrompt(prompt_template)
if input_column is None:
raise Exception(f'`input_column` must be provided at model creation time or through USING clause when predicting. Please try again.') # noqa
if input_column not in df.columns:
raise Exception(f'Column "{input_column}" not found in input data! Please try again.')
questions = df[input_column]
index_path = self.model_storage.folder_get('context')
storage_context = StorageContext.from_defaults(persist_dir=index_path)
service_context = self._get_service_context()
index = load_index_from_storage(storage_context, service_context=service_context)
query_engine = index.as_query_engine(**engine_kwargs)
results = []
for question in questions:
query_results = query_engine.query(question) # TODO: provide extra_info in explain_target col
results.append(query_results.response)
result_df = pd.DataFrame({'question': questions, args['target']: results}) # result_df['answer'].tolist()
return result_df
def _get_service_context(self):
args = self.model_storage.json_get('args')
openai_api_key = self._get_llama_index_api_key(args['using'])
openai.api_key = openai_api_key # TODO: shouldn't have to do this! bug?
llm_kwargs = {
'openai_api_key': openai_api_key
}
if 'temperature' in args['using']:
llm_kwargs['temperature'] = args['using']['temperature']
if 'model_name' in args['using']:
llm_kwargs['model_name'] = args['using']['model_name']
if 'max_tokens' in args['using']:
llm_kwargs['max_tokens'] = args['using']['max_tokens']
llm = OpenAI(**llm_kwargs) # TODO: all usual params should go here
embed_model = OpenAIEmbedding(openai_api_key=openai_api_key)
service_context = ServiceContext.from_defaults(
llm_predictor=LLMPredictor(llm=llm),
embed_model=embed_model
)
return service_context
def _setup_index(self, documents):
args = self.model_storage.json_get('args')
indexer: VectorStore = getattr(llama_index, args['using']['index_class'])
index = indexer.from_documents(documents, service_context=self._get_service_context())
return index
def _get_llama_index_api_key(self, args, strict=True):
"""
API_KEY preference order:
1. provided at model creation
2. provided at engine creation
3. OPENAI_API_KEY env variable
4. llama_index.OPENAI_API_KEY setting in config.json
Note: method is not case sensitive.
"""
key = 'OPENAI_API_KEY'
for k in key, key.lower():
# 1
if args.get(k):
return args[k]
# 2
connection_args = self.engine_storage.get_connection_args()
if k in connection_args:
return connection_args[k]
# 3
api_key = os.getenv(k)
if api_key is not None:
return api_key
# 4
config = Config()
openai_cfg = config.get('llama_index', {})
if k in openai_cfg:
return openai_cfg[k]
if strict:
raise Exception(f'Missing API key "{k}". Either re-create this ML_ENGINE specifying the `{k}` parameter, or re-create this model and pass the API key with `USING` syntax.') # noqa
| [
"llama_index.SimpleWebPageReader",
"llama_index.LLMPredictor",
"llama_index.OpenAIEmbedding",
"llama_index.StorageContext.from_defaults",
"llama_index.QuestionAnswerPrompt",
"llama_index.load_index_from_storage",
"llama_index.readers.schema.base.Document"
] | [((6636, 6688), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_path'}), '(persist_dir=index_path)\n', (6664, 6688), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((6759, 6832), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (6782, 6832), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((7132, 7194), 'pandas.DataFrame', 'pd.DataFrame', (["{'question': questions, args['target']: results}"], {}), "({'question': questions, args['target']: results})\n", (7144, 7194), True, 'import pandas as pd\n'), ((7914, 7934), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '(**llm_kwargs)\n', (7920, 7934), False, 'from langchain.llms import OpenAI\n'), ((7998, 8044), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'openai_api_key': 'openai_api_key'}), '(openai_api_key=openai_api_key)\n', (8013, 8044), False, 'from llama_index import LLMPredictor, OpenAIEmbedding\n'), ((2754, 2782), 'pandas.DataFrame', 'pd.DataFrame', (["[{'text': ''}]"], {}), "([{'text': ''}])\n", (2766, 2782), True, 'import pandas as pd\n'), ((5742, 5779), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['prompt_template'], {}), '(prompt_template)\n', (5762, 5779), False, 'from llama_index import SimpleWebPageReader, QuestionAnswerPrompt\n'), ((9223, 9235), 'os.getenv', 'os.getenv', (['k'], {}), '(k)\n', (9232, 9235), False, 'import os\n'), ((9340, 9348), 'mindsdb.utilities.config.Config', 'Config', ([], {}), '()\n', (9346, 9348), False, 'from mindsdb.utilities.config import Config\n'), ((6120, 6157), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['prompt_template'], {}), '(prompt_template)\n', (6140, 6157), False, 'from llama_index import SimpleWebPageReader, QuestionAnswerPrompt\n'), ((8127, 8148), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (8139, 8148), False, 'from llama_index import LLMPredictor, OpenAIEmbedding\n'), ((2995, 3006), 'llama_index.readers.schema.base.Document', 'Document', (['x'], {}), '(x)\n', (3003, 3006), False, 'from llama_index.readers.schema.base import Document\n'), ((3321, 3359), 'llama_index.SimpleWebPageReader', 'SimpleWebPageReader', ([], {'html_to_text': '(True)'}), '(html_to_text=True)\n', (3340, 3359), False, 'from llama_index import SimpleWebPageReader, QuestionAnswerPrompt\n')] |
"""
This script is used to summarize conversations from Zendesk support tickets.
It reads text files containing comments from the ticket and generates a summary
that includes information about the participants, problems raised, key events,
current status of the ticket, and log lines from the messages.
The script uses the `Gemini` model from the `llama_index` package to generate the summary.
The summary is saved in a text file for each ticket.
Usage:
- Modify the `MODEL` variable to specify the desired model for summarization.
- Run the script to generate summaries for the tickets.
Note: This script requires the `llama_index` package to be installed.
"""
import os
import glob
import llama_index
from llama_index.core import ServiceContext
from llama_index.llms.gemini import Gemini
from llama_index.core import SimpleDirectoryReader
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.evaluation import FaithfulnessEvaluator
MODEL = "Gemini"
DATA_DIR = "data"
SUMMARY_ROOT = "structured.summaries"
SUMMARY_DIR = os.path.join(SUMMARY_ROOT, MODEL).replace(":", "_")
os.makedirs(SUMMARY_DIR, exist_ok=True)
def saveText(path, text):
"Save the given text to a file at the specified path."
with open(path, "w") as f:
f.write(text)
def commentPaths(ticketNumber):
"Returns a sorted list of file paths for the comments in Zendesk ticket `ticketNumber`."
ticketDir = os.path.join(DATA_DIR, ticketNumber)
return sorted(glob.glob(os.path.join(ticketDir, "*.txt")))
def summaryPath(ticketNumber):
"Returns the file path for where we store the summary of Zendesk ticket `ticketNumber`."
return os.path.join(SUMMARY_DIR, f"{ticketNumber}.txt")
def totalSizeKB(paths):
"Returns the total size in kilobytes of the files specified by `paths`."
return sum(os.path.getsize(path) for path in paths) / 1024
def currentTime():
"Returns the current time in the format 'dd/mm/YYYY HH:MM:SS'."
from datetime import datetime
now = datetime.now()
return now.strftime("%d/%m/%Y %H:%M:%S")
llm = Gemini()
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local")
summarizer = TreeSummarize(service_context=service_context, verbose=False)
# evaluator = FaithfulnessEvaluator(llm=llm)
COMPANY = "PaperCut"
BASE_PROMPT = f"The following text is a series of messages from a {COMPANY} support ticket."
def makePrompt(text):
return f"{BASE_PROMPT}\n{text}"
QUESTION_DETAIL = [
("Summary", "Summarise the whole conversation in one sentence."),
("Problems", """List the problems raised in the ticket.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Problems are issues that need to be resolved, such as a bug, a feature request.
Questions about how to use the product are not problems.
Responses to problems are not problems.
Each problem should be a single sentence describing the problem.
When there is no problem, don't write a line.
If there are multiple problems, order them by importance, most important first."""),
("Status", """What is the current status of the ticket?
Is it open, closed, or pending?
If it is closed, what was the resolution?
If it is pending, what is the next action?
If it is open, what is the current problem?
Do not include any other information in this answer.
Your answer should be one sentence for status and optionally one sentence for the resolution or next action.
"""),
("Participants", """List the participants and who they work for.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Use the format: 'Name: Company.'
List the customer first and {COMPANY} staff last.
"""),
("Events", """List the key events and the date they occurred.
An event is something that happens, such as a problem being reported, a solution being proposed, or a resolution being reached.
Don't include contacts, responses, or other non-events.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Questions about how to use the product are not events.
Responses to problems are not events.
Log lines are not events.
When there is no event, don't write a line.
Use the format: 'Date: Event.'
Format the date as 'YYYY-MM-DD'.
Order the list by date, earliest first."""),
("Logs", """List all the log lines from the messages.
Use a numbered list.
Order the list by date, earliest first.
Don't add a prologue or epilogue to the list.
When there is no log line, don't write a line.
Write the full log line.
Log lines are lines that start with a date and a status such as INFO, WARN, DEBUG or ERROR.
Example: 2022-01-27 13:31:43,628 WARN
Example: 2022-01-26 12:40:18,380 DEBUG ClientManagerImpl
Example: ERROR | wrapper | 2022/01/27 13:30:58 | JVM exited unexpectedly. """),
]
QUESTIONS = [question for question, _ in QUESTION_DETAIL]
QUESTION_PROMPT = {short: makePrompt(detail) for (short, detail) in QUESTION_DETAIL}
def makeAnswer(question, answer):
question = f"{question.upper()}:"
return f"{question:13} -------------------------------------------------------------*\n{answer}"
def summariseTicket(ticketNumber):
"""Summarizes the ticket `ticketNumber` by generating answers to a set of predefined questions.
Returns: Structured text containing the answers to each of the questions based on the
comments in the ticket.
"""
t0 = time.time()
input_files = commentPaths(ticketNumber)
reader = SimpleDirectoryReader(input_files=input_files)
docs = reader.load_data()
texts = [doc.text for doc in docs]
print(f"Loaded {len(texts)} comments in {time.time() - t0:.2f} seconds")
questionAnswer = {}
for question in reversed(QUESTIONS):
t0 = time.time()
prompt = QUESTION_PROMPT[question]
answer = summarizer.get_response(prompt, texts)
questionAnswer[question] = answer.strip()
print(f"{time.time() - t0:5.2f} seconds to answer {question}")
return "\n\n".join(makeAnswer(question, questionAnswer[question]) for question in QUESTIONS)
#
# Test case.
#
# ['1259693', '1216136', '1196141', '1260221', '1116722', '1280919']
# 0: 1259693 7 comments 2.888 kb
# 1: 1216136 26 comments 20.715 kb
# 2: 1196141 122 comments 81.527 kb
# 3: 1260221 106 comments 126.619 kb
# 4: 1116722 288 comments 190.168 kb
# 5: 1280919 216 comments 731.220 kb
MAX_SIZE = 100 # Maximum size of ticket comments in kilobytes.
if __name__ == "__main__":
import time
print(f"MODEL={MODEL}")
ticketNumbers = sorted(os.path.basename(path) for path in glob.glob(os.path.join(DATA_DIR, "*")))
ticketNumbers.sort(key=lambda k: (totalSizeKB(commentPaths(k)), k))
# ticketNumbers = ticketNumbers[:2]
ticketNumbers = [k for k in ticketNumbers if totalSizeKB(commentPaths(k)) < MAX_SIZE]
print(ticketNumbers)
for i, ticketNumber in enumerate(ticketNumbers):
paths = commentPaths(ticketNumber)
print(f"{i:4}: {ticketNumber:8} {len(paths):3} comments {totalSizeKB(paths):7.3f} kb")
# ticketNumbers = ticketNumbers[:1]
t00 = time.time()
summaries = {}
durations = {}
commentCounts = {}
commentSizes = {}
for i, ticketNumber in enumerate(ticketNumbers):
commentCount = len(commentPaths(ticketNumber))
commentSize = totalSizeKB(commentPaths(ticketNumber))
print(f"{i:2}: ticketNumber={ticketNumber:8} {commentCount:3} comments {commentSize:7.3f} kb {currentTime()}",
flush=True)
if os.path.exists(summaryPath(ticketNumber)):
print(f" skipping ticket {ticketNumber}", flush=True)
continue # Skip tickets that have already been summarised.
t0 = time.time()
summary = summariseTicket(ticketNumber)
duration = time.time() - t0
description = f"{commentCount} comments {commentSize:7.3f} kb {duration:5.2f} sec summary={len(summary)}"
print(f" {description}", flush=True)
with open(summaryPath(ticketNumber), "w") as f:
print(f"Summary: ticket {ticketNumber}: {description} -------------------------", file=f)
print(summary, file=f)
summaries[ticketNumber] = summary
durations[ticketNumber] = duration
commentCounts[ticketNumber] = commentCount
commentSizes[ticketNumber] = commentSize
duration = time.time() - t00
print("====================^^^====================")
print(f"Duration: {duration:.2f} seconds")
for i, ticketNumber in enumerate(ticketNumbers):
if os.path.exists(summaryPath(ticketNumber)):
continue
commentCount = commentCounts[ticketNumber]
commentSize = totalSizeKB(commentPaths(ticketNumber))
duration = durations[ticketNumber]
print(f"{i:2}: {ticketNumber:8}: {commentCount:3} comments {commentSize:7.3f} kb {duration:5.2f} seconds")
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.gemini.Gemini",
"llama_index.core.response_synthesizers.TreeSummarize",
"llama_index.core.ServiceContext.from_defaults"
] | [((1113, 1152), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (1124, 1152), False, 'import os\n'), ((2083, 2091), 'llama_index.llms.gemini.Gemini', 'Gemini', ([], {}), '()\n', (2089, 2091), False, 'from llama_index.llms.gemini import Gemini\n'), ((2110, 2168), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (2138, 2168), False, 'from llama_index.core import ServiceContext\n'), ((2182, 2243), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context', 'verbose': '(False)'}), '(service_context=service_context, verbose=False)\n', (2195, 2243), False, 'from llama_index.core.response_synthesizers import TreeSummarize\n'), ((1434, 1470), 'os.path.join', 'os.path.join', (['DATA_DIR', 'ticketNumber'], {}), '(DATA_DIR, ticketNumber)\n', (1446, 1470), False, 'import os\n'), ((1670, 1718), 'os.path.join', 'os.path.join', (['SUMMARY_DIR', 'f"""{ticketNumber}.txt"""'], {}), "(SUMMARY_DIR, f'{ticketNumber}.txt')\n", (1682, 1718), False, 'import os\n'), ((2016, 2030), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2028, 2030), False, 'from datetime import datetime\n'), ((5369, 5380), 'time.time', 'time.time', ([], {}), '()\n', (5378, 5380), False, 'import time\n'), ((5439, 5485), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'input_files'}), '(input_files=input_files)\n', (5460, 5485), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((7081, 7092), 'time.time', 'time.time', ([], {}), '()\n', (7090, 7092), False, 'import time\n'), ((1060, 1093), 'os.path.join', 'os.path.join', (['SUMMARY_ROOT', 'MODEL'], {}), '(SUMMARY_ROOT, MODEL)\n', (1072, 1093), False, 'import os\n'), ((5710, 5721), 'time.time', 'time.time', ([], {}), '()\n', (5719, 5721), False, 'import time\n'), ((7704, 7715), 'time.time', 'time.time', ([], {}), '()\n', (7713, 7715), False, 'import time\n'), ((8357, 8368), 'time.time', 'time.time', ([], {}), '()\n', (8366, 8368), False, 'import time\n'), ((1499, 1531), 'os.path.join', 'os.path.join', (['ticketDir', '"""*.txt"""'], {}), "(ticketDir, '*.txt')\n", (1511, 1531), False, 'import os\n'), ((6537, 6559), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (6553, 6559), False, 'import os\n'), ((7783, 7794), 'time.time', 'time.time', ([], {}), '()\n', (7792, 7794), False, 'import time\n'), ((1836, 1857), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (1851, 1857), False, 'import os\n'), ((5600, 5611), 'time.time', 'time.time', ([], {}), '()\n', (5609, 5611), False, 'import time\n'), ((6582, 6609), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""*"""'], {}), "(DATA_DIR, '*')\n", (6594, 6609), False, 'import os\n'), ((5888, 5899), 'time.time', 'time.time', ([], {}), '()\n', (5897, 5899), False, 'import time\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.legacy.bridge.pydantic import BaseModel # type: ignore
from llama_index.legacy.callbacks.schema import CBEventType, EventPayload
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.indices.query.schema import QueryBundle
from llama_index.legacy.prompts.mixin import PromptDictType
from llama_index.legacy.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.legacy.types import RESPONSE_TEXT_TYPE
from llama_index.legacy.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.legacy.core.response.schema.Response",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.indices.query.schema.QueryBundle",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service"
] | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((2809, 2842), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2840, 2842), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4901, 4950), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4905, 4950), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6844, 6870), 'llama_index.legacy.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6852, 6870), False, 'from llama_index.legacy.core.response.schema import Response\n'), ((6927, 6955), 'llama_index.legacy.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6938, 6955), False, 'from llama_index.legacy.indices.query.schema import QueryBundle\n'), ((8366, 8388), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8374, 8388), False, 'from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.legacy.bridge.pydantic import PrivateAttr
from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode
from llama_index.legacy.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.legacy.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(BasePydanticVectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
index_name: str
es_client: Optional[Any]
es_url: Optional[str]
es_cloud_id: Optional[str]
es_api_key: Optional[str]
es_user: Optional[str]
es_password: Optional[str]
text_field: str = "content"
vector_field: str = "embedding"
batch_size: int = 200
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE"
_client = PrivateAttr()
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
super().__init__(
index_name=index_name,
es_client=es_client,
es_url=es_url,
es_cloud_id=es_cloud_id,
es_api_key=es_api_key,
es_user=es_user,
es_password=es_password,
text_field=text_field,
vector_field=vector_field,
batch_size=batch_size,
distance_strategy=distance_strategy,
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index.legacy
return f"llama_index-py-vs/{llama_index.legacy.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships") or {}
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.vector_stores.utils.node_to_metadata_dict",
"llama_index.legacy.vector_stores.utils.metadata_dict_to_node"
] | [((640, 659), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (649, 659), False, 'from logging import getLogger\n'), ((2343, 2396), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2375, 2396), False, 'import elasticsearch\n'), ((3719, 3735), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3727, 3735), True, 'import numpy as np\n'), ((5274, 5287), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (5285, 5287), False, 'from llama_index.legacy.bridge.pydantic import PrivateAttr\n'), ((5793, 5813), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5811, 5813), False, 'import nest_asyncio\n'), ((17423, 17463), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (17427, 17463), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12909, 12984), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12919, 12984), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((10374, 10398), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (10396, 10398), False, 'import asyncio\n'), ((12301, 12346), 'llama_index.legacy.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (12322, 12346), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13056, 13120), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (13066, 13120), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13994, 14018), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (14016, 14018), False, 'import asyncio\n'), ((16201, 16225), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (16223, 16225), False, 'import asyncio\n'), ((19323, 19354), 'llama_index.legacy.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (19344, 19354), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3771, 3793), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3777, 3793), True, 'import numpy as np\n'), ((12535, 12547), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12545, 12547), False, 'import uuid\n'), ((20003, 20146), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (20011, 20146), False, 'from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode\n')] |
# You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python)
import os
import requests
import chainlit as cl
from dotenv import load_dotenv
import llama_index
from llama_index.core import set_global_handler
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
from llama_index.core import (
SimpleDirectoryReader,
load_index_from_storage,
VectorStoreIndex,
StorageContext,
)
from llama_index.vector_stores.faiss import FaissVectorStore
import faiss
# ChatOpenAI Templates
system_template = """You are a helpful assistant who always speaks in a pleasant tone!
"""
user_template = """{input}
Think through your response step by step.
"""
# query_engine = index.as_query_engine()
# response = query_engine.query("Who is the E-VP, Operations - and how old are they?")
# print(response.response)
#
# response = query_engine.query("What is the gross carrying amount of Total Amortizable Intangible Assets for Jan 29, 2023?")
# print(response.response)
# if storage folder exists and is not empty, load the index from it else from documents
@cl.on_chat_start
async def start_chat():
load_dotenv()
set_global_handler("wandb", run_args={"project": "aie1-llama-index-middleterm"})
wandb_callback = llama_index.core.global_handler
Settings.llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
index = None
if os.path.exists("./storage") and os.listdir("./storage"):
vector_store = FaissVectorStore.from_persist_dir("./storage")
storage_context = StorageContext.from_defaults(
vector_store=vector_store, persist_dir="./storage"
)
index = load_index_from_storage(storage_context=storage_context)
else:
with requests.get('https://d18rn0p25nwr6d.cloudfront.net/CIK-0001045810/1cbe8fe7-e08a-46e3-8dcc-b429fc06c1a4.pdf',
stream=True) as r:
r.raise_for_status() # Raises a HTTPError if the response status code is 4XX/5XX
os.makedirs(os.path.dirname('nvidia_data/paper.pdf'), exist_ok=True)
with open('nvidia_data/paper.pdf', 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
documents = SimpleDirectoryReader('nvidia_data/').load_data()
faiss_index = faiss.IndexFlatL2(1536)
storage_context = StorageContext.from_defaults(vector_store=FaissVectorStore(faiss_index=faiss_index))
index = VectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
persist_dir="./storage"
)
cl.user_session.set("wandb_callback", wandb_callback)
cl.user_session.set("query_engine", index.as_query_engine())
@cl.on_message
async def main(message: cl.Message):
Settings.callback_manager = cl.user_session.get("wandb_callback")
query_engine = cl.user_session.get("query_engine")
template = (f"You are a helpful assistant who always speaks in a pleasant tone! responds to user input with a step by step guide using this context: {message.content} input: {input}")
response = query_engine.query(template)
response_message = cl.Message(content="")
for token in response.response:
await response_message.stream_token(token=token)
await response_message.send()
@cl.on_stop
def on_stop():
print("The user wants to stop the task!")
cl.user_session.get("wandb_callback").finish()
@cl.on_chat_end
def on_chat_end():
print("The user disconnected!")
cl.user_session.get("wandb_callback").finish() | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.llms.openai.OpenAI",
"llama_index.vector_stores.faiss.FaissVectorStore",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.set_global_handler",
"llama_index.core.SimpleDirectoryReader",
"llama_index.embeddings.openai.OpenAIEmbedding",
"llama_index.vector_stores.faiss.FaissVectorStore.from_persist_dir"
] | [((1242, 1255), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1253, 1255), False, 'from dotenv import load_dotenv\n'), ((1261, 1346), 'llama_index.core.set_global_handler', 'set_global_handler', (['"""wandb"""'], {'run_args': "{'project': 'aie1-llama-index-middleterm'}"}), "('wandb', run_args={'project': 'aie1-llama-index-middleterm'}\n )\n", (1279, 1346), False, 'from llama_index.core import set_global_handler\n'), ((1415, 1461), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0.1)', 'model': '"""gpt-3.5-turbo"""'}), "(temperature=0.1, model='gpt-3.5-turbo')\n", (1421, 1461), False, 'from llama_index.llms.openai import OpenAI\n'), ((1489, 1536), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-3-small"""'}), "(model='text-embedding-3-small')\n", (1504, 1536), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((2795, 2848), 'chainlit.user_session.set', 'cl.user_session.set', (['"""wandb_callback"""', 'wandb_callback'], {}), "('wandb_callback', wandb_callback)\n", (2814, 2848), True, 'import chainlit as cl\n'), ((3000, 3037), 'chainlit.user_session.get', 'cl.user_session.get', (['"""wandb_callback"""'], {}), "('wandb_callback')\n", (3019, 3037), True, 'import chainlit as cl\n'), ((3057, 3092), 'chainlit.user_session.get', 'cl.user_session.get', (['"""query_engine"""'], {}), "('query_engine')\n", (3076, 3092), True, 'import chainlit as cl\n'), ((3349, 3371), 'chainlit.Message', 'cl.Message', ([], {'content': '""""""'}), "(content='')\n", (3359, 3371), True, 'import chainlit as cl\n'), ((1562, 1589), 'os.path.exists', 'os.path.exists', (['"""./storage"""'], {}), "('./storage')\n", (1576, 1589), False, 'import os\n'), ((1594, 1617), 'os.listdir', 'os.listdir', (['"""./storage"""'], {}), "('./storage')\n", (1604, 1617), False, 'import os\n'), ((1642, 1688), 'llama_index.vector_stores.faiss.FaissVectorStore.from_persist_dir', 'FaissVectorStore.from_persist_dir', (['"""./storage"""'], {}), "('./storage')\n", (1675, 1688), False, 'from llama_index.vector_stores.faiss import FaissVectorStore\n'), ((1715, 1800), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store', 'persist_dir': '"""./storage"""'}), "(vector_store=vector_store, persist_dir='./storage'\n )\n", (1743, 1800), False, 'from llama_index.core import SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext\n'), ((1834, 1890), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context'}), '(storage_context=storage_context)\n', (1857, 1890), False, 'from llama_index.core import SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext\n'), ((2492, 2515), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['(1536)'], {}), '(1536)\n', (2509, 2515), False, 'import faiss\n'), ((2643, 2747), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'persist_dir': '"""./storage"""'}), "(documents, storage_context=storage_context,\n persist_dir='./storage')\n", (2674, 2747), False, 'from llama_index.core import SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext\n'), ((1914, 2046), 'requests.get', 'requests.get', (['"""https://d18rn0p25nwr6d.cloudfront.net/CIK-0001045810/1cbe8fe7-e08a-46e3-8dcc-b429fc06c1a4.pdf"""'], {'stream': '(True)'}), "(\n 'https://d18rn0p25nwr6d.cloudfront.net/CIK-0001045810/1cbe8fe7-e08a-46e3-8dcc-b429fc06c1a4.pdf'\n , stream=True)\n", (1926, 2046), False, 'import requests\n'), ((3579, 3616), 'chainlit.user_session.get', 'cl.user_session.get', (['"""wandb_callback"""'], {}), "('wandb_callback')\n", (3598, 3616), True, 'import chainlit as cl\n'), ((3703, 3740), 'chainlit.user_session.get', 'cl.user_session.get', (['"""wandb_callback"""'], {}), "('wandb_callback')\n", (3722, 3740), True, 'import chainlit as cl\n'), ((2187, 2227), 'os.path.dirname', 'os.path.dirname', (['"""nvidia_data/paper.pdf"""'], {}), "('nvidia_data/paper.pdf')\n", (2202, 2227), False, 'import os\n'), ((2420, 2457), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""nvidia_data/"""'], {}), "('nvidia_data/')\n", (2441, 2457), False, 'from llama_index.core import SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext\n'), ((2584, 2625), 'llama_index.vector_stores.faiss.FaissVectorStore', 'FaissVectorStore', ([], {'faiss_index': 'faiss_index'}), '(faiss_index=faiss_index)\n', (2600, 2625), False, 'from llama_index.vector_stores.faiss import FaissVectorStore\n')] |
"""
This script is used to summarize conversations from Zendesk support tickets.
It reads text files containing comments from the ticket and generates a summary
that includes information about the participants, problems raised, key events,
current status of the ticket, and log lines from the messages.
The script uses the `llama2` model from the `llama_index` package to generate the summary.
The summary is saved in a text file for each ticket.
Usage:
- Modify the `MODEL` variable to specify the desired model for summarization.
- Run the script to generate summaries for the tickets.
Note: This script requires the `llama_index` package to be installed.
"""
import os
import glob
import llama_index
from llama_index.core import ServiceContext
from llama_index.llms.ollama import Ollama
from llama_index.core import SimpleDirectoryReader
from llama_index.core.response_synthesizers import TreeSummarize
# MODEL = "mistral"
MODEL = "llama2"
# MODEL = "llama2:text" # Doesn't follow instructions.
# MODEL = "mistral:instruct"
# MODEL = "llama2:13b" # Crushes my Mac
DATA_DIR = "data"
SUMMARY_ROOT = "structured.summaries"
SUMMARY_DIR = os.path.join(SUMMARY_ROOT, MODEL).replace(":", "_")
os.makedirs(SUMMARY_DIR, exist_ok=True)
def saveText(path, text):
"Save the given text to a file at the specified path."
with open(path, "w") as f:
f.write(text)
def commentPaths(ticketNumber):
"Returns a sorted list of file paths for the comments in Zendesk ticket `ticketNumber`."
ticketDir = os.path.join(DATA_DIR, ticketNumber)
return sorted(glob.glob(os.path.join(ticketDir, "*.txt")))
def summaryPath(ticketNumber):
"Returns the file path for where we store the summary of Zendesk ticket `ticketNumber`."
return os.path.join(SUMMARY_DIR, f"{ticketNumber}.txt")
def totalSizeKB(paths):
"Returns the total size in kilobytes of the files specified by `paths`."
return sum(os.path.getsize(path) for path in paths) / 1024
def currentTime():
"Returns the current time in the format 'dd/mm/YYYY HH:MM:SS'."
from datetime import datetime
now = datetime.now()
return now.strftime("%d/%m/%Y %H:%M:%S")
TIMEOUT_SEC = 600
print(f"Loading {MODEL}")
llm = Ollama(model=MODEL, request_timeout=TIMEOUT_SEC)
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local")
summarizer = TreeSummarize(service_context=service_context, verbose=False)
# evaluator = FaithfulnessEvaluator(llm=llm)
COMPANY = "PaperCut"
BASE_PROMPT = f"The following text is a series of messages from a {COMPANY} support ticket."
def makePrompt(text):
return f"{BASE_PROMPT}\n{text}"
QUESTION_DETAIL = [
("Summary", "Summarise the whole conversation in one sentence."),
("Problems", """List the problems raised in the ticket.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Problems are issues that need to be resolved, such as a bug, a feature request.
Questions about how to use the product are not problems.
Responses to problems are not problems.
Each problem should be a single sentence describing the problem.
When there is no problem, don't write a line.
If there are multiple problems, order them by importance, most important first."""),
("Status", """What is the current status of the ticket?
Is it open, closed, or pending?
If it is closed, what was the resolution?
If it is pending, what is the next action?
If it is open, what is the current problem?
Do not include any other information in this answer.
Your answer should be one sentence for status and optionally one sentence for the resolution or next action.
"""),
("Participants", """List the participants and who they work for.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Use the format: 'Name: Company.'
List the customer first and {COMPANY} staff last.
"""),
("Events", """List the key events and the date they occurred.
An event is something that happens, such as a problem being reported, a solution being proposed, or a resolution being reached.
Don't include contacts, responses, or other non-events.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Questions about how to use the product are not events.
Responses to problems are not events.
Log lines are not events.
When there is no event, don't write a line.
Use the format: 'Date: Event.'
Format the date as 'YYYY-MM-DD'.
Order the list by date, earliest first."""),
("Logs", """List all the log lines from the messages.
Use a numbered list.
Order the list by date, earliest first.
Don't add a prologue or epilogue to the list.
When there is no log line, don't write a line.
Write the full log line.
Log lines are lines that start with a date and a status such as INFO, WARN, DEBUG or ERROR.
Example: 2022-01-27 13:31:43,628 WARN
Example: 2022-01-26 12:40:18,380 DEBUG ClientManagerImpl
Example: ERROR | wrapper | 2022/01/27 13:30:58 | JVM exited unexpectedly. """),
]
QUESTIONS = [question for question, _ in QUESTION_DETAIL]
QUESTION_PROMPT = {short: makePrompt(detail) for (short, detail) in QUESTION_DETAIL}
def makeAnswer(question, answer):
question = f"{question.upper()}:"
return f"{question:13} -------------------------------------------------------------*\n{answer}"
def summariseTicket(ticketNumber):
"""Summarizes the ticket `ticketNumber` by generating answers to a set of predefined questions.
Returns: Structured text containing the answers to each of the questions based on the
comments in the ticket.
"""
t0 = time.time()
input_files = commentPaths(ticketNumber)
reader = SimpleDirectoryReader(input_files=input_files)
docs = reader.load_data()
texts = [doc.text for doc in docs]
print(f"Loaded {len(texts)} comments in {time.time() - t0:.2f} seconds")
questionAnswer = {}
for question in reversed(QUESTIONS):
t0 = time.time()
prompt = QUESTION_PROMPT[question]
answer = summarizer.get_response(prompt, texts)
questionAnswer[question] = answer.strip()
print(f"{time.time() - t0:5.2f} seconds to answer {question}")
return "\n\n".join(makeAnswer(question, questionAnswer[question]) for question in QUESTIONS)
#
# Test case.
#
# ['1259693', '1216136', '1196141', '1260221', '1116722', '1280919']
# 0: 1259693 7 comments 2.888 kb
# 1: 1216136 26 comments 20.715 kb
# 2: 1196141 122 comments 81.527 kb
# 3: 1260221 106 comments 126.619 kb
# 4: 1116722 288 comments 190.168 kb
# 5: 1280919 216 comments 731.220 kb
MAX_SIZE = 100 # Maximum size of ticket comments in kilobytes.
if __name__ == "__main__":
import time
print(f"MODEL={MODEL}")
ticketNumbers = sorted(os.path.basename(path) for path in glob.glob(os.path.join(DATA_DIR, "*")))
ticketNumbers.sort(key=lambda k: (totalSizeKB(commentPaths(k)), k))
# ticketNumbers = ticketNumbers[:2]
ticketNumbers = [k for k in ticketNumbers if totalSizeKB(commentPaths(k)) < MAX_SIZE]
print(ticketNumbers)
for i, ticketNumber in enumerate(ticketNumbers):
paths = commentPaths(ticketNumber)
print(f"{i:4}: {ticketNumber:8} {len(paths):3} comments {totalSizeKB(paths):7.3f} kb")
# ticketNumbers = ticketNumbers[:1]
t00 = time.time()
summaries = {}
durations = {}
commentCounts = {}
commentSizes = {}
for i, ticketNumber in enumerate(ticketNumbers):
commentCount = len(commentPaths(ticketNumber))
commentSize = totalSizeKB(commentPaths(ticketNumber))
print(f"{i:2}: ticketNumber={ticketNumber:8} {commentCount:3} comments {commentSize:7.3f} kb {currentTime()}",
flush=True)
if os.path.exists(summaryPath(ticketNumber)):
print(f" skipping ticket {ticketNumber}", flush=True)
continue # Skip tickets that have already been summarised.
t0 = time.time()
summary = summariseTicket(ticketNumber)
duration = time.time() - t0
description = f"{commentCount} comments {commentSize:7.3f} kb {duration:5.2f} sec summary={len(summary)}"
print(f" {description}", flush=True)
with open(summaryPath(ticketNumber), "w") as f:
print(f"Summary: ticket {ticketNumber}: {description} -------------------------", file=f)
print(summary, file=f)
summaries[ticketNumber] = summary
durations[ticketNumber] = duration
commentCounts[ticketNumber] = commentCount
commentSizes[ticketNumber] = commentSize
duration = time.time() - t00
print("====================^^^====================")
print(f"Duration: {duration:.2f} seconds")
for i, ticketNumber in enumerate(ticketNumbers):
if os.path.exists(summaryPath(ticketNumber)):
continue
commentCount = commentCounts[ticketNumber]
commentSize = totalSizeKB(commentPaths(ticketNumber))
duration = durations[ticketNumber]
print(f"{i:2}: {ticketNumber:8}: {commentCount:3} comments {commentSize:7.3f} kb {duration:5.2f} seconds")
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.ollama.Ollama",
"llama_index.core.response_synthesizers.TreeSummarize",
"llama_index.core.ServiceContext.from_defaults"
] | [((1201, 1240), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (1212, 1240), False, 'import os\n'), ((2215, 2263), 'llama_index.llms.ollama.Ollama', 'Ollama', ([], {'model': 'MODEL', 'request_timeout': 'TIMEOUT_SEC'}), '(model=MODEL, request_timeout=TIMEOUT_SEC)\n', (2221, 2263), False, 'from llama_index.llms.ollama import Ollama\n'), ((2282, 2340), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (2310, 2340), False, 'from llama_index.core import ServiceContext\n'), ((2354, 2415), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context', 'verbose': '(False)'}), '(service_context=service_context, verbose=False)\n', (2367, 2415), False, 'from llama_index.core.response_synthesizers import TreeSummarize\n'), ((1522, 1558), 'os.path.join', 'os.path.join', (['DATA_DIR', 'ticketNumber'], {}), '(DATA_DIR, ticketNumber)\n', (1534, 1558), False, 'import os\n'), ((1758, 1806), 'os.path.join', 'os.path.join', (['SUMMARY_DIR', 'f"""{ticketNumber}.txt"""'], {}), "(SUMMARY_DIR, f'{ticketNumber}.txt')\n", (1770, 1806), False, 'import os\n'), ((2104, 2118), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2116, 2118), False, 'from datetime import datetime\n'), ((5541, 5552), 'time.time', 'time.time', ([], {}), '()\n', (5550, 5552), False, 'import time\n'), ((5611, 5657), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'input_files'}), '(input_files=input_files)\n', (5632, 5657), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((7253, 7264), 'time.time', 'time.time', ([], {}), '()\n', (7262, 7264), False, 'import time\n'), ((1148, 1181), 'os.path.join', 'os.path.join', (['SUMMARY_ROOT', 'MODEL'], {}), '(SUMMARY_ROOT, MODEL)\n', (1160, 1181), False, 'import os\n'), ((5882, 5893), 'time.time', 'time.time', ([], {}), '()\n', (5891, 5893), False, 'import time\n'), ((7876, 7887), 'time.time', 'time.time', ([], {}), '()\n', (7885, 7887), False, 'import time\n'), ((8529, 8540), 'time.time', 'time.time', ([], {}), '()\n', (8538, 8540), False, 'import time\n'), ((1587, 1619), 'os.path.join', 'os.path.join', (['ticketDir', '"""*.txt"""'], {}), "(ticketDir, '*.txt')\n", (1599, 1619), False, 'import os\n'), ((6709, 6731), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (6725, 6731), False, 'import os\n'), ((7955, 7966), 'time.time', 'time.time', ([], {}), '()\n', (7964, 7966), False, 'import time\n'), ((1924, 1945), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (1939, 1945), False, 'import os\n'), ((5772, 5783), 'time.time', 'time.time', ([], {}), '()\n', (5781, 5783), False, 'import time\n'), ((6754, 6781), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""*"""'], {}), "(DATA_DIR, '*')\n", (6766, 6781), False, 'import os\n'), ((6060, 6071), 'time.time', 'time.time', ([], {}), '()\n', (6069, 6071), False, 'import time\n')] |
"""Response builder class.
This class provides general functions for taking in a set of text
and generating a response.
Will support different modes, from 1) stuffing chunks into prompt,
2) create and refine separately over each chunk, 3) tree summarization.
"""
import logging
from abc import abstractmethod
from typing import Any, Dict, Generator, List, Optional, Sequence, AsyncGenerator
from llama_index.core.base.query_pipeline.query import (
ChainableMixin,
InputKeys,
OutputKeys,
QueryComponent,
validate_and_convert_stringable,
)
from llama_index.core.base.response.schema import (
RESPONSE_TYPE,
PydanticResponse,
Response,
StreamingResponse,
AsyncStreamingResponse,
)
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.prompt_helper import PromptHelper
from llama_index.core.prompts.mixin import PromptMixin
from llama_index.core.schema import (
BaseNode,
MetadataMode,
NodeWithScore,
QueryBundle,
QueryType,
)
from llama_index.core.service_context import ServiceContext
from llama_index.core.service_context_elements.llm_predictor import LLMPredictorType
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
llm_from_settings_or_context,
)
from llama_index.core.types import RESPONSE_TEXT_TYPE
from llama_index.core.instrumentation.events.synthesis import (
SynthesizeStartEvent,
SynthesizeEndEvent,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
logger = logging.getLogger(__name__)
QueryTextType = QueryType
def empty_response_generator() -> Generator[str, None, None]:
yield "Empty Response"
async def empty_response_agenerator() -> AsyncGenerator[str, None]:
yield "Empty Response"
class BaseSynthesizer(ChainableMixin, PromptMixin):
"""Response builder class."""
def __init__(
self,
llm: Optional[LLMPredictorType] = None,
callback_manager: Optional[CallbackManager] = None,
prompt_helper: Optional[PromptHelper] = None,
streaming: bool = False,
output_cls: BaseModel = None,
# deprecated
service_context: Optional[ServiceContext] = None,
) -> None:
"""Init params."""
self._llm = llm or llm_from_settings_or_context(Settings, service_context)
if callback_manager:
self._llm.callback_manager = callback_manager
self._callback_manager = (
callback_manager
or callback_manager_from_settings_or_context(Settings, service_context)
)
self._prompt_helper = (
prompt_helper
or Settings._prompt_helper
or PromptHelper.from_llm_metadata(
self._llm.metadata,
)
)
self._streaming = streaming
self._output_cls = output_cls
def _get_prompt_modules(self) -> Dict[str, Any]:
"""Get prompt modules."""
# TODO: keep this for now since response synthesizers don't generally have sub-modules
return {}
@property
def callback_manager(self) -> CallbackManager:
return self._callback_manager
@callback_manager.setter
def callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
self._callback_manager = callback_manager
# TODO: please fix this later
self._callback_manager = callback_manager
self._llm.callback_manager = callback_manager
@abstractmethod
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Get response."""
...
@abstractmethod
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
"""Get response."""
...
def _log_prompt_and_response(
self,
formatted_prompt: str,
response: RESPONSE_TEXT_TYPE,
log_prefix: str = "",
) -> None:
"""Log prompt and response from LLM."""
logger.debug(f"> {log_prefix} prompt template: {formatted_prompt}")
logger.debug(f"> {log_prefix} response: {response}")
def _get_metadata_for_response(
self,
nodes: List[BaseNode],
) -> Optional[Dict[str, Any]]:
"""Get metadata for response."""
return {node.node_id: node.metadata for node in nodes}
def _prepare_response_output(
self,
response_str: Optional[RESPONSE_TEXT_TYPE],
source_nodes: List[NodeWithScore],
) -> RESPONSE_TYPE:
"""Prepare response object from response string."""
response_metadata = self._get_metadata_for_response(
[node_with_score.node for node_with_score in source_nodes]
)
if isinstance(response_str, str):
return Response(
response_str,
source_nodes=source_nodes,
metadata=response_metadata,
)
if isinstance(response_str, Generator):
return StreamingResponse(
response_str,
source_nodes=source_nodes,
metadata=response_metadata,
)
if isinstance(response_str, AsyncGenerator):
return AsyncStreamingResponse(
response_str,
source_nodes=source_nodes,
metadata=response_metadata,
)
if isinstance(response_str, self._output_cls):
return PydanticResponse(
response_str, source_nodes=source_nodes, metadata=response_metadata
)
raise ValueError(
f"Response must be a string or a generator. Found {type(response_str)}"
)
@dispatcher.span
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> RESPONSE_TYPE:
dispatcher.event(SynthesizeStartEvent(query=query))
if len(nodes) == 0:
if self._streaming:
empty_response = StreamingResponse(
response_gen=empty_response_generator()
)
dispatcher.event(
SynthesizeEndEvent(query=query, response=str(empty_response))
)
return empty_response
else:
empty_response = Response("Empty Response")
dispatcher.event(
SynthesizeEndEvent(query=query, response=str(empty_response))
)
return empty_response
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
response_str = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = additional_source_nodes or []
source_nodes = list(nodes) + list(additional_source_nodes)
response = self._prepare_response_output(response_str, source_nodes)
event.on_end(payload={EventPayload.RESPONSE: response})
dispatcher.event(SynthesizeEndEvent(query=query, response=str(response)))
return response
@dispatcher.span
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> RESPONSE_TYPE:
dispatcher.event(SynthesizeStartEvent(query=query))
if len(nodes) == 0:
if self._streaming:
empty_response = AsyncStreamingResponse(
response_gen=empty_response_agenerator()
)
dispatcher.event(
SynthesizeEndEvent(query=query, response=str(empty_response))
)
return empty_response
else:
empty_response = Response("Empty Response")
dispatcher.event(
SynthesizeEndEvent(query=query, response=str(empty_response))
)
return empty_response
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
response_str = await self.aget_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = additional_source_nodes or []
source_nodes = list(nodes) + list(additional_source_nodes)
response = self._prepare_response_output(response_str, source_nodes)
event.on_end(payload={EventPayload.RESPONSE: response})
dispatcher.event(SynthesizeEndEvent(query=query, response=str(response)))
return response
def _as_query_component(self, **kwargs: Any) -> QueryComponent:
"""As query component."""
return SynthesizerComponent(synthesizer=self)
class SynthesizerComponent(QueryComponent):
"""Synthesizer component."""
synthesizer: BaseSynthesizer = Field(..., description="Synthesizer")
class Config:
arbitrary_types_allowed = True
def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
self.synthesizer.callback_manager = callback_manager
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
# make sure both query_str and nodes are there
if "query_str" not in input:
raise ValueError("Input must have key 'query_str'")
input["query_str"] = validate_and_convert_stringable(input["query_str"])
if "nodes" not in input:
raise ValueError("Input must have key 'nodes'")
nodes = input["nodes"]
if not isinstance(nodes, list):
raise ValueError("Input nodes must be a list")
for node in nodes:
if not isinstance(node, NodeWithScore):
raise ValueError("Input nodes must be a list of NodeWithScore")
return input
def _run_component(self, **kwargs: Any) -> Dict[str, Any]:
"""Run component."""
output = self.synthesizer.synthesize(kwargs["query_str"], kwargs["nodes"])
return {"output": output}
async def _arun_component(self, **kwargs: Any) -> Dict[str, Any]:
"""Run component."""
output = await self.synthesizer.asynthesize(
kwargs["query_str"], kwargs["nodes"]
)
return {"output": output}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
return InputKeys.from_keys({"query_str", "nodes"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
| [
"llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.core.base.response.schema.StreamingResponse",
"llama_index.core.base.query_pipeline.query.InputKeys.from_keys",
"llama_index.core.base.query_pipeline.query.OutputKeys.from_keys",
"llama_index.core.base.response.schema.Response",
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.instrumentation.events.synthesis.SynthesizeStartEvent",
"llama_index.core.base.response.schema.AsyncStreamingResponse",
"llama_index.core.base.query_pipeline.query.validate_and_convert_stringable",
"llama_index.core.schema.QueryBundle",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.base.response.schema.PydanticResponse"
] | [((1679, 1714), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (1704, 1714), True, 'import llama_index.core.instrumentation as instrument\n'), ((1725, 1752), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1742, 1752), False, 'import logging\n'), ((9986, 10023), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Synthesizer"""'}), "(..., description='Synthesizer')\n", (9991, 10023), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((10590, 10641), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['query_str']"], {}), "(input['query_str'])\n", (10621, 10641), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((11597, 11640), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'query_str', 'nodes'}"], {}), "({'query_str', 'nodes'})\n", (11616, 11640), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((11739, 11771), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (11759, 11771), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((2470, 2525), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (2498, 2525), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((2694, 2762), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (2735, 2762), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((2886, 2936), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', (['self._llm.metadata'], {}), '(self._llm.metadata)\n', (2916, 2936), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((5135, 5212), 'llama_index.core.base.response.schema.Response', 'Response', (['response_str'], {'source_nodes': 'source_nodes', 'metadata': 'response_metadata'}), '(response_str, source_nodes=source_nodes, metadata=response_metadata)\n', (5143, 5212), False, 'from llama_index.core.base.response.schema import RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, AsyncStreamingResponse\n'), ((5343, 5434), 'llama_index.core.base.response.schema.StreamingResponse', 'StreamingResponse', (['response_str'], {'source_nodes': 'source_nodes', 'metadata': 'response_metadata'}), '(response_str, source_nodes=source_nodes, metadata=\n response_metadata)\n', (5360, 5434), False, 'from llama_index.core.base.response.schema import RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, AsyncStreamingResponse\n'), ((5565, 5661), 'llama_index.core.base.response.schema.AsyncStreamingResponse', 'AsyncStreamingResponse', (['response_str'], {'source_nodes': 'source_nodes', 'metadata': 'response_metadata'}), '(response_str, source_nodes=source_nodes, metadata=\n response_metadata)\n', (5587, 5661), False, 'from llama_index.core.base.response.schema import RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, AsyncStreamingResponse\n'), ((5794, 5884), 'llama_index.core.base.response.schema.PydanticResponse', 'PydanticResponse', (['response_str'], {'source_nodes': 'source_nodes', 'metadata': 'response_metadata'}), '(response_str, source_nodes=source_nodes, metadata=\n response_metadata)\n', (5810, 5884), False, 'from llama_index.core.base.response.schema import RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, AsyncStreamingResponse\n'), ((6309, 6342), 'llama_index.core.instrumentation.events.synthesis.SynthesizeStartEvent', 'SynthesizeStartEvent', ([], {'query': 'query'}), '(query=query)\n', (6329, 6342), False, 'from llama_index.core.instrumentation.events.synthesis import SynthesizeStartEvent, SynthesizeEndEvent\n'), ((7013, 7041), 'llama_index.core.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (7024, 7041), False, 'from llama_index.core.schema import BaseNode, MetadataMode, NodeWithScore, QueryBundle, QueryType\n'), ((8148, 8181), 'llama_index.core.instrumentation.events.synthesis.SynthesizeStartEvent', 'SynthesizeStartEvent', ([], {'query': 'query'}), '(query=query)\n', (8168, 8181), False, 'from llama_index.core.instrumentation.events.synthesis import SynthesizeStartEvent, SynthesizeEndEvent\n'), ((8857, 8885), 'llama_index.core.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (8868, 8885), False, 'from llama_index.core.schema import BaseNode, MetadataMode, NodeWithScore, QueryBundle, QueryType\n'), ((6758, 6784), 'llama_index.core.base.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6766, 6784), False, 'from llama_index.core.base.response.schema import RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, AsyncStreamingResponse\n'), ((8602, 8628), 'llama_index.core.base.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (8610, 8628), False, 'from llama_index.core.base.response.schema import RESPONSE_TYPE, PydanticResponse, Response, StreamingResponse, AsyncStreamingResponse\n')] |
"""
This script is used to summarize conversations from Zendesk support tickets.
It reads text files containing comments from the ticket and generates a summary
that includes information about the participants, problems raised, key events,
current status of the ticket, and log lines from the messages.
The script uses the `Anthropic` model from the `llama_index` package to generate the summary.
The summary is saved in a text file for each ticket.
Usage:
- Modify the `MODEL` variable to specify the desired model for summarization.
- Run the script to generate summaries for the tickets.
Note: This script requires the `llama_index` package to be installed.
"""
import os
import glob
import llama_index
from llama_index.core import ServiceContext
from llama_index.llms.anthropic import Anthropic
from llama_index.core import SimpleDirectoryReader
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.evaluation import FaithfulnessEvaluator
# MODEL = "claude-3-opus-20240229"
# MODEL = "claude-3-sonnet-20240229"
MODEL = "claude-3-haiku-20240307"
DATA_DIR = "data"
SUMMARY_ROOT = "structured.summaries"
SUMMARY_DIR = os.path.join(SUMMARY_ROOT, MODEL).replace(":", "_")
os.makedirs(SUMMARY_DIR, exist_ok=True)
def saveText(path, text):
"Save the given text to a file at the specified path."
with open(path, "w") as f:
f.write(text)
def commentPaths(ticketNumber):
"Returns a sorted list of file paths for the comments in Zendesk ticket `ticketNumber`."
ticketDir = os.path.join(DATA_DIR, ticketNumber)
return sorted(glob.glob(os.path.join(ticketDir, "*.txt")))
def summaryPath(ticketNumber):
"Returns the file path for where we store the summary of Zendesk ticket `ticketNumber`."
return os.path.join(SUMMARY_DIR, f"{ticketNumber}.txt")
def totalSizeKB(paths):
"Returns the total size in kilobytes of the files specified by `paths`."
return sum(os.path.getsize(path) for path in paths) / 1024
def currentTime():
"Returns the current time in the format 'dd/mm/YYYY HH:MM:SS'."
from datetime import datetime
now = datetime.now()
return now.strftime("%d/%m/%Y %H:%M:%S")
llm = Anthropic(model=MODEL, max_tokens=1024)
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local")
summarizer = TreeSummarize(service_context=service_context, verbose=False)
# evaluator = FaithfulnessEvaluator(llm=llm)
COMPANY = "PaperCut"
BASE_PROMPT = f"The following text is a series of messages from a {COMPANY} support ticket."
def makePrompt(text):
return f"{BASE_PROMPT}\n{text}"
QUESTION_DETAIL = [
("Summary", "Summarise the whole conversation in one sentence."),
("Problems", """List the problems raised in the ticket.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Problems are issues that need to be resolved, such as a bug, a feature request.
Questions about how to use the product are not problems.
Responses to problems are not problems.
Each problem should be a single sentence describing the problem.
When there is no problem, don't write a line.
If there are multiple problems, order them by importance, most important first."""),
("Status", """What is the current status of the ticket?
Is it open, closed, or pending?
If it is closed, what was the resolution?
If it is pending, what is the next action?
If it is open, what is the current problem?
Do not include any other information in this answer.
Your answer should be one sentence for status and optionally one sentence for the resolution or next action.
"""),
("Participants", """List the participants and who they work for.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Use the format: 'Name: Company.'
List the customer first and {COMPANY} staff last.
"""),
("Events", """List the key events and the date they occurred.
An event is something that happens, such as a problem being reported, a solution being proposed, or a resolution being reached.
Don't include contacts, responses, or other non-events.
Use a numbered list.
Don't add a prologue or epilogue to the list.
Questions about how to use the product are not events.
Responses to problems are not events.
Log lines are not events.
When there is no event, don't write a line.
Use the format: 'Date: Event.'
Format the date as 'YYYY-MM-DD'.
Order the list by date, earliest first."""),
("Logs", """List all the log lines from the messages.
Use a numbered list.
Order the list by date, earliest first.
Don't add a prologue or epilogue to the list.
When there is no log line, don't write a line.
Write the full log line.
Log lines are lines that start with a date and a status such as INFO, WARN, DEBUG or ERROR.
Example: 2022-01-27 13:31:43,628 WARN
Example: 2022-01-26 12:40:18,380 DEBUG ClientManagerImpl
Example: ERROR | wrapper | 2022/01/27 13:30:58 | JVM exited unexpectedly. """),
]
QUESTIONS = [question for question, _ in QUESTION_DETAIL]
QUESTION_PROMPT = {short: makePrompt(detail) for (short, detail) in QUESTION_DETAIL}
def makeAnswer(question, answer):
question = f"{question.upper()}:"
return f"{question:13} -------------------------------------------------------------*\n{answer}"
def summariseTicket(ticketNumber):
"""Summarizes the ticket `ticketNumber` by generating answers to a set of predefined questions.
Returns: Structured text containing the answers to each of the questions based on the
comments in the ticket.
"""
t0 = time.time()
input_files = commentPaths(ticketNumber)
reader = SimpleDirectoryReader(input_files=input_files)
docs = reader.load_data()
texts = [doc.text for doc in docs]
print(f"Loaded {len(texts)} comments in {time.time() - t0:.2f} seconds")
questionAnswer = {}
for question in reversed(QUESTIONS):
t0 = time.time()
prompt = QUESTION_PROMPT[question]
answer = summarizer.get_response(prompt, texts)
questionAnswer[question] = answer.strip()
print(f"{time.time() - t0:5.2f} seconds to answer {question}")
return "\n\n".join(makeAnswer(question, questionAnswer[question]) for question in QUESTIONS)
#
# Test case.
#
# ['1259693', '1216136', '1196141', '1260221', '1116722', '1280919']
# 0: 1259693 7 comments 2.888 kb
# 1: 1216136 26 comments 20.715 kb
# 2: 1196141 122 comments 81.527 kb
# 3: 1260221 106 comments 126.619 kb
# 4: 1116722 288 comments 190.168 kb
# 5: 1280919 216 comments 731.220 kb
MAX_SIZE = 100 # Maximum size of ticket comments in kilobytes.
if __name__ == "__main__":
import time
print(f"MODEL={MODEL}")
ticketNumbers = sorted(os.path.basename(path) for path in glob.glob(os.path.join(DATA_DIR, "*")))
ticketNumbers.sort(key=lambda k: (totalSizeKB(commentPaths(k)), k))
# ticketNumbers = ticketNumbers[:2]
ticketNumbers = [k for k in ticketNumbers if totalSizeKB(commentPaths(k)) < MAX_SIZE]
print(ticketNumbers)
for i, ticketNumber in enumerate(ticketNumbers):
paths = commentPaths(ticketNumber)
print(f"{i:4}: {ticketNumber:8} {len(paths):3} comments {totalSizeKB(paths):7.3f} kb")
# ticketNumbers = ticketNumbers[:1]
t00 = time.time()
summaries = {}
durations = {}
commentCounts = {}
commentSizes = {}
for i, ticketNumber in enumerate(ticketNumbers):
commentCount = len(commentPaths(ticketNumber))
commentSize = totalSizeKB(commentPaths(ticketNumber))
print(f"{i:2}: ticketNumber={ticketNumber:8} {commentCount:3} comments {commentSize:7.3f} kb {currentTime()}",
flush=True)
if os.path.exists(summaryPath(ticketNumber)):
print(f" skipping ticket {ticketNumber}", flush=True)
continue # Skip tickets that have already been summarised.
t0 = time.time()
summary = summariseTicket(ticketNumber)
duration = time.time() - t0
description = f"{commentCount} comments {commentSize:7.3f} kb {duration:5.2f} sec summary={len(summary)}"
print(f" {description}", flush=True)
with open(summaryPath(ticketNumber), "w") as f:
print(f"Summary: ticket {ticketNumber}: {description} -------------------------", file=f)
print(summary, file=f)
summaries[ticketNumber] = summary
durations[ticketNumber] = duration
commentCounts[ticketNumber] = commentCount
commentSizes[ticketNumber] = commentSize
duration = time.time() - t00
print("====================^^^====================")
print(f"Duration: {duration:.2f} seconds")
for i, ticketNumber in enumerate(ticketNumbers):
if os.path.exists(summaryPath(ticketNumber)):
continue
commentCount = commentCounts[ticketNumber]
commentSize = totalSizeKB(commentPaths(ticketNumber))
duration = durations[ticketNumber]
print(f"{i:2}: {ticketNumber:8}: {commentCount:3} comments {commentSize:7.3f} kb {duration:5.2f} seconds")
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.ServiceContext.from_defaults",
"llama_index.llms.anthropic.Anthropic",
"llama_index.core.response_synthesizers.TreeSummarize"
] | [((1211, 1250), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (1222, 1250), False, 'import os\n'), ((2181, 2220), 'llama_index.llms.anthropic.Anthropic', 'Anthropic', ([], {'model': 'MODEL', 'max_tokens': '(1024)'}), '(model=MODEL, max_tokens=1024)\n', (2190, 2220), False, 'from llama_index.llms.anthropic import Anthropic\n'), ((2239, 2297), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (2267, 2297), False, 'from llama_index.core import ServiceContext\n'), ((2311, 2372), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context', 'verbose': '(False)'}), '(service_context=service_context, verbose=False)\n', (2324, 2372), False, 'from llama_index.core.response_synthesizers import TreeSummarize\n'), ((1532, 1568), 'os.path.join', 'os.path.join', (['DATA_DIR', 'ticketNumber'], {}), '(DATA_DIR, ticketNumber)\n', (1544, 1568), False, 'import os\n'), ((1768, 1816), 'os.path.join', 'os.path.join', (['SUMMARY_DIR', 'f"""{ticketNumber}.txt"""'], {}), "(SUMMARY_DIR, f'{ticketNumber}.txt')\n", (1780, 1816), False, 'import os\n'), ((2114, 2128), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2126, 2128), False, 'from datetime import datetime\n'), ((5498, 5509), 'time.time', 'time.time', ([], {}), '()\n', (5507, 5509), False, 'import time\n'), ((5568, 5614), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'input_files'}), '(input_files=input_files)\n', (5589, 5614), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((7210, 7221), 'time.time', 'time.time', ([], {}), '()\n', (7219, 7221), False, 'import time\n'), ((1158, 1191), 'os.path.join', 'os.path.join', (['SUMMARY_ROOT', 'MODEL'], {}), '(SUMMARY_ROOT, MODEL)\n', (1170, 1191), False, 'import os\n'), ((5839, 5850), 'time.time', 'time.time', ([], {}), '()\n', (5848, 5850), False, 'import time\n'), ((7833, 7844), 'time.time', 'time.time', ([], {}), '()\n', (7842, 7844), False, 'import time\n'), ((8486, 8497), 'time.time', 'time.time', ([], {}), '()\n', (8495, 8497), False, 'import time\n'), ((1597, 1629), 'os.path.join', 'os.path.join', (['ticketDir', '"""*.txt"""'], {}), "(ticketDir, '*.txt')\n", (1609, 1629), False, 'import os\n'), ((6666, 6688), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (6682, 6688), False, 'import os\n'), ((7912, 7923), 'time.time', 'time.time', ([], {}), '()\n', (7921, 7923), False, 'import time\n'), ((1934, 1955), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (1949, 1955), False, 'import os\n'), ((5729, 5740), 'time.time', 'time.time', ([], {}), '()\n', (5738, 5740), False, 'import time\n'), ((6711, 6738), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""*"""'], {}), "(DATA_DIR, '*')\n", (6723, 6738), False, 'import os\n'), ((6017, 6028), 'time.time', 'time.time', ([], {}), '()\n', (6026, 6028), False, 'import time\n')] |
"""Google Generative AI Vector Store.
The GenAI Semantic Retriever API is a managed end-to-end service that allows
developers to create a corpus of documents to perform semantic search on
related passages given a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast
from llama_index.core.bridge.pydantic import ( # type: ignore
BaseModel,
Field,
PrivateAttr,
)
from llama_index.core.schema import BaseNode, RelatedNodeInfo, TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from google.auth import credentials
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_default_doc_id = "default-doc"
"""Google GenerativeAI service context.
Use this to provide the correct service context for `GoogleVectorStore`.
See the docstring for `GoogleVectorStore` for usage example.
"""
def set_google_config(
*,
api_endpoint: Optional[str] = None,
user_agent: Optional[str] = None,
page_size: Optional[int] = None,
auth_credentials: Optional["credentials.Credentials"] = None,
**kwargs: Any,
) -> None:
"""
Set the configuration for Google Generative AI API.
Parameters are optional, Normally, the defaults should work fine.
If provided, they will override the default values in the Config class.
See the docstring in `genai_extension.py` for more details.
auth_credentials: Optional["credentials.Credentials"] = None,
Use this to pass Google Auth credentials such as using a service account.
Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Example:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
"/path/to/service.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
config_attrs = {
"api_endpoint": api_endpoint,
"user_agent": user_agent,
"page_size": page_size,
"auth_credentials": auth_credentials,
"testing": kwargs.get("testing", None),
}
attrs = {k: v for k, v in config_attrs.items() if v is not None}
config = genaix.Config(**attrs)
genaix.set_config(config)
class NoSuchCorpusException(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
class GoogleVectorStore(BasePydanticVectorStore):
"""Google GenerativeAI Vector Store.
Currently, it computes the embedding vectors on the server side.
Example:
google_vector_store = GoogleVectorStore.from_corpus(
corpus_id="my-corpus-id",
include_metadata=True,
metadata_keys=['file_name', 'creation_date']
)
index = VectorStoreIndex.from_vector_store(
vector_store=google_vector_store
)
Attributes:
corpus_id: The corpus ID that this vector store instance will read and
write to.
include_metadata (bool): Indicates whether to include custom metadata in the query
results. Defaults to False.
metadata_keys (Optional[List[str]]): Specifies which metadata keys to include in the
query results if include_metadata is set to True. If None, all metadata keys
are included. Defaults to None.
"""
# Semantic Retriever stores the document node's text as string and embeds
# the vectors on the server automatically.
stores_text: bool = True
is_embedding_query: bool = False
# This is not the Google's corpus name but an ID generated in the LlamaIndex
# world.
corpus_id: str = Field(frozen=True)
"""Corpus ID that this instance of the vector store is using."""
# Configuration options for handling metadata in query results
include_metadata: bool = False
metadata_keys: Optional[List[str]] = None
_client: Any = PrivateAttr()
def __init__(self, *, client: Any, **kwargs: Any):
"""Raw constructor.
Use the class method `from_corpus` or `create_corpus` instead.
Args:
client: The low-level retriever class from google.ai.generativelanguage.
"""
try:
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(**kwargs)
assert isinstance(client, genai.RetrieverServiceClient)
self._client = client
@classmethod
def from_corpus(
cls,
*,
corpus_id: str,
include_metadata: bool = False,
metadata_keys: Optional[List[str]] = None,
) -> "GoogleVectorStore":
"""Create an instance that points to an existing corpus.
Args:
corpus_id (str): ID of an existing corpus on Google's server.
include_metadata (bool, optional): Specifies whether to include custom metadata in the
query results. Defaults to False, meaning metadata will not be included.
metadata_keys (Optional[List[str]], optional): Specifies which metadata keys to include
in the query results if include_metadata is set to True. If None, all metadata keys
are included. Defaults to None.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
NoSuchCorpusException if no such corpus is found.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.from_corpus(corpus_id={corpus_id})")
client = genaix.build_semantic_retriever()
if genaix.get_corpus(corpus_id=corpus_id, client=client) is None:
raise NoSuchCorpusException(corpus_id=corpus_id)
return cls(
corpus_id=corpus_id,
client=client,
include_metadata=include_metadata,
metadata_keys=metadata_keys,
)
@classmethod
def create_corpus(
cls, *, corpus_id: Optional[str] = None, display_name: Optional[str] = None
) -> "GoogleVectorStore":
"""Create an instance that points to a newly created corpus.
Examples:
store = GoogleVectorStore.create_corpus()
print(f"Created corpus with ID: {store.corpus_id})
store = GoogleVectorStore.create_corpus(
display_name="My first corpus"
)
store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus-1",
display_name="My first corpus"
)
Args:
corpus_id: ID of the new corpus to be created. If not provided,
Google server will provide one for you.
display_name: Title of the corpus. If not provided, Google server
will provide one for you.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
An exception if the corpus already exists or the user hits the
quota limit.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(
f"\n\nGoogleVectorStore.create_corpus(new_corpus_id={corpus_id}, new_display_name={display_name})"
)
client = genaix.build_semantic_retriever()
new_corpus_id = corpus_id or str(uuid.uuid4())
new_corpus = genaix.create_corpus(
corpus_id=new_corpus_id, display_name=display_name, client=client
)
name = genaix.EntityName.from_str(new_corpus.name)
return cls(corpus_id=name.corpus_id, client=client)
@classmethod
def class_name(cls) -> str:
return "GoogleVectorStore"
@property
def client(self) -> Any:
return self._client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add nodes with embedding to vector store.
If a node has a source node, the source node's ID will be used to create
a document. Otherwise, a default document for that corpus will be used
to house the node.
Furthermore, if the source node has a metadata field "file_name", it
will be used as the title of the document. If the source node has no
such field, Google server will assign a title to the document.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.add([
TextNode(
text="Hello, my darling",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
TextNode(
text="Goodbye, my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
])
The above code will create one document with ID `doc-456` and title
`Title for doc-456`. This document will house both nodes.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.add(nodes={nodes})")
client = cast(genai.RetrieverServiceClient, self.client)
created_node_ids: List[str] = []
for nodeGroup in _group_nodes_by_source(nodes):
source = nodeGroup.source_node
document_id = source.node_id
document = genaix.get_document(
corpus_id=self.corpus_id, document_id=document_id, client=client
)
if not document:
genaix.create_document(
corpus_id=self.corpus_id,
display_name=source.metadata.get("file_name", None),
document_id=document_id,
metadata=source.metadata,
client=client,
)
created_chunks = genaix.batch_create_chunk(
corpus_id=self.corpus_id,
document_id=document_id,
texts=[node.get_content() for node in nodeGroup.nodes],
metadatas=[node.metadata for node in nodeGroup.nodes],
client=client,
)
created_node_ids.extend([chunk.name for chunk in created_chunks])
return created_node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes by ref_doc_id.
Both the underlying nodes and the document will be deleted from Google
server.
Args:
ref_doc_id: The document ID to be deleted.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.delete(ref_doc_id={ref_doc_id})")
client = cast(genai.RetrieverServiceClient, self.client)
genaix.delete_document(
corpus_id=self.corpus_id, document_id=ref_doc_id, client=client
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
# Only nodes with this author.
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
# Only from these docs. If not provided,
# the entire corpus is searched.
doc_ids=["doc-456"],
similarity_top_k=3,
)
)
Args:
query: See `llama_index.core.vector_stores.types.VectorStoreQuery`.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.query(query={query})")
query_str = query.query_str
if query_str is None:
raise ValueError("VectorStoreQuery.query_str should not be None.")
client = cast(genai.RetrieverServiceClient, self.client)
relevant_chunks: List[genai.RelevantChunk] = []
if query.doc_ids is None:
# The chunks from query_corpus should be sorted in reverse order by
# relevant score.
relevant_chunks = genaix.query_corpus(
corpus_id=self.corpus_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
else:
for doc_id in query.doc_ids:
relevant_chunks.extend(
genaix.query_document(
corpus_id=self.corpus_id,
document_id=doc_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
)
# Make sure the chunks are reversed sorted according to relevant
# scores even across multiple documents.
relevant_chunks.sort(key=lambda c: c.chunk_relevance_score, reverse=True)
nodes = []
include_metadata = self.include_metadata
metadata_keys = self.metadata_keys
for chunk in relevant_chunks:
metadata = {}
if include_metadata:
for custom_metadata in chunk.chunk.custom_metadata:
# Use getattr to safely extract values
value = getattr(custom_metadata, "string_value", None)
if (
value is None
): # If string_value is not set, check for numeric_value
value = getattr(custom_metadata, "numeric_value", None)
# Add to the metadata dictionary only those keys that are present in metadata_keys
if value is not None and (
metadata_keys is None or custom_metadata.key in metadata_keys
):
metadata[custom_metadata.key] = value
text_node = TextNode(
text=chunk.chunk.data.string_value,
id=_extract_chunk_id(chunk.chunk.name),
metadata=metadata, # Adding metadata to the node
)
nodes.append(text_node)
return VectorStoreQueryResult(
nodes=nodes,
ids=[_extract_chunk_id(chunk.chunk.name) for chunk in relevant_chunks],
similarities=[chunk.chunk_relevance_score for chunk in relevant_chunks],
)
def _extract_chunk_id(entity_name: str) -> str:
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
id = genaix.EntityName.from_str(entity_name).chunk_id
assert id is not None
return id
class _NodeGroup(BaseModel):
"""Every node in nodes have the same source node."""
source_node: RelatedNodeInfo
nodes: List[BaseNode]
def _group_nodes_by_source(nodes: Sequence[BaseNode]) -> List[_NodeGroup]:
"""Returns a list of lists of nodes where each list has all the nodes
from the same document.
"""
groups: Dict[str, _NodeGroup] = {}
for node in nodes:
source_node: RelatedNodeInfo
if isinstance(node.source_node, RelatedNodeInfo):
source_node = node.source_node
else:
source_node = RelatedNodeInfo(node_id=_default_doc_id)
if source_node.node_id not in groups:
groups[source_node.node_id] = _NodeGroup(source_node=source_node, nodes=[])
groups[source_node.node_id].nodes.append(node)
return list(groups.values())
def _convert_filter(fs: Optional[MetadataFilters]) -> Dict[str, Any]:
if fs is None:
return {}
assert isinstance(fs, MetadataFilters)
return {f.key: f.value for f in fs.filters}
| [
"llama_index.vector_stores.google.genai_extension.build_semantic_retriever",
"llama_index.vector_stores.google.genai_extension.get_corpus",
"llama_index.vector_stores.google.genai_extension.Config",
"llama_index.vector_stores.google.genai_extension.EntityName.from_str",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.schema.RelatedNodeInfo",
"llama_index.vector_stores.google.genai_extension.get_document",
"llama_index.vector_stores.google.genai_extension.delete_document",
"llama_index.core.bridge.pydantic.PrivateAttr",
"llama_index.vector_stores.google.genai_extension.set_config",
"llama_index.vector_stores.google.genai_extension.create_corpus"
] | [((812, 839), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (829, 839), False, 'import logging\n'), ((2859, 2881), 'llama_index.vector_stores.google.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (2872, 2881), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((2886, 2911), 'llama_index.vector_stores.google.genai_extension.set_config', 'genaix.set_config', (['config'], {}), '(config)\n', (2903, 2911), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((4343, 4361), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4348, 4361), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((4600, 4613), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (4611, 4613), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((6413, 6446), 'llama_index.vector_stores.google.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (6444, 6446), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8216, 8249), 'llama_index.vector_stores.google.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (8247, 8249), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8326, 8417), 'llama_index.vector_stores.google.genai_extension.create_corpus', 'genaix.create_corpus', ([], {'corpus_id': 'new_corpus_id', 'display_name': 'display_name', 'client': 'client'}), '(corpus_id=new_corpus_id, display_name=display_name,\n client=client)\n', (8346, 8417), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8451, 8494), 'llama_index.vector_stores.google.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['new_corpus.name'], {}), '(new_corpus.name)\n', (8477, 8494), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((10566, 10613), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (10570, 10613), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((12317, 12364), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (12321, 12364), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((12373, 12464), 'llama_index.vector_stores.google.genai_extension.delete_document', 'genaix.delete_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'ref_doc_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=ref_doc_id,\n client=client)\n', (12395, 12464), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((13968, 14015), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (13972, 14015), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((16832, 16871), 'llama_index.vector_stores.google.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['entity_name'], {}), '(entity_name)\n', (16858, 16871), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((6458, 6511), 'llama_index.vector_stores.google.genai_extension.get_corpus', 'genaix.get_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (6475, 6511), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((10819, 10908), 'llama_index.vector_stores.google.genai_extension.get_document', 'genaix.get_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'document_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=document_id,\n client=client)\n', (10838, 10908), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((17496, 17536), 'llama_index.core.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '_default_doc_id'}), '(node_id=_default_doc_id)\n', (17511, 17536), False, 'from llama_index.core.schema import BaseNode, RelatedNodeInfo, TextNode\n'), ((8291, 8303), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8301, 8303), False, 'import uuid\n')] |
from typing import Any
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
from llama_index.core.callbacks.simple_llm_handler import SimpleLLMHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index.core
llama_index.core.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
try:
from llama_index.callbacks.wandb import (
WandbCallbackHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"WandbCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-wandb`"
)
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
try:
from llama_index.callbacks.openinference import (
OpenInferenceCallbackHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"OpenInferenceCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-openinference`"
)
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
try:
from llama_index.callbacks.arize_phoenix import (
arize_phoenix_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"ArizePhoenixCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-arize-phoenix`"
)
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
try:
from llama_index.callbacks.honeyhive import (
honeyhive_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"HoneyHiveCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-honeyhive`"
)
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
try:
from llama_index.callbacks.promptlayer import (
PromptLayerHandler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"PromptLayerHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-promptlayer`"
)
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
try:
from llama_index.callbacks.deepeval import (
deepeval_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"DeepEvalCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-deepeval`"
)
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
try:
from llama_index.callbacks.argilla import (
argilla_callback_handler,
) # pants: no-infer-dep
except ImportError:
raise ImportError(
"ArgillaCallbackHandler is not installed. "
"Please install it using `pip install llama-index-callbacks-argilla`"
)
handler = argilla_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.wandb.WandbCallbackHandler",
"llama_index.callbacks.deepeval.deepeval_callback_handler",
"llama_index.callbacks.argilla.argilla_callback_handler",
"llama_index.callbacks.honeyhive.honeyhive_callback_handler",
"llama_index.callbacks.openinference.OpenInferenceCallbackHandler",
"llama_index.callbacks.promptlayer.PromptLayerHandler",
"llama_index.callbacks.arize_phoenix.arize_phoenix_callback_handler",
"llama_index.core.callbacks.simple_llm_handler.SimpleLLMHandler"
] | [((941, 976), 'llama_index.callbacks.wandb.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (961, 976), False, 'from llama_index.callbacks.wandb import WandbCallbackHandler\n'), ((1424, 1467), 'llama_index.callbacks.openinference.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1452, 1467), False, 'from llama_index.callbacks.openinference import OpenInferenceCallbackHandler\n'), ((1916, 1961), 'llama_index.callbacks.arize_phoenix.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1946, 1961), False, 'from llama_index.callbacks.arize_phoenix import arize_phoenix_callback_handler\n'), ((2390, 2431), 'llama_index.callbacks.honeyhive.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (2416, 2431), False, 'from llama_index.callbacks.honeyhive import honeyhive_callback_handler\n'), ((2852, 2885), 'llama_index.callbacks.promptlayer.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (2870, 2885), False, 'from llama_index.callbacks.promptlayer import PromptLayerHandler\n'), ((3309, 3349), 'llama_index.callbacks.deepeval.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (3334, 3349), False, 'from llama_index.callbacks.deepeval import deepeval_callback_handler\n'), ((3400, 3431), 'llama_index.core.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (3416, 3431), False, 'from llama_index.core.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((3850, 3889), 'llama_index.callbacks.argilla.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (3874, 3889), False, 'from llama_index.callbacks.argilla import argilla_callback_handler\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.bridge.pydantic import BaseModel # type: ignore
from llama_index.callbacks.schema import CBEventType, EventPayload
from llama_index.core.response.schema import Response
from llama_index.indices.query.schema import QueryBundle
from llama_index.prompts.mixin import PromptDictType
from llama_index.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.types import RESPONSE_TEXT_TYPE
from llama_index.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.indices.query.schema.QueryBundle",
"llama_index.schema.TextNode",
"llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service",
"llama_index.core.response.schema.Response"
] | [((1051, 1078), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1068, 1078), False, 'import logging\n'), ((2739, 2772), 'llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2770, 2772), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4824, 4873), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4828, 4873), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6767, 6793), 'llama_index.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6775, 6793), False, 'from llama_index.core.response.schema import Response\n'), ((6850, 6878), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6861, 6878), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((8289, 8311), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8297, 8311), False, 'from llama_index.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.bridge.pydantic import PrivateAttr
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(BasePydanticVectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
index_name: str
es_client: Optional[Any]
es_url: Optional[str]
es_cloud_id: Optional[str]
es_api_key: Optional[str]
es_user: Optional[str]
es_password: Optional[str]
text_field: str = "content"
vector_field: str = "embedding"
batch_size: int = 200
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE"
_client = PrivateAttr()
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
super().__init__(
index_name=index_name,
es_client=es_client,
es_url=es_url,
es_cloud_id=es_cloud_id,
es_api_key=es_api_key,
es_user=es_user,
es_password=es_password,
text_field=text_field,
vector_field=vector_field,
batch_size=batch_size,
distance_strategy=distance_strategy,
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index
return f"llama_index-py-vs/{llama_index.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships")
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.bridge.pydantic.PrivateAttr",
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.node_to_metadata_dict"
] | [((599, 618), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (608, 618), False, 'from logging import getLogger\n'), ((2444, 2497), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2476, 2497), False, 'import elasticsearch\n'), ((3820, 3836), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3828, 3836), True, 'import numpy as np\n'), ((5375, 5388), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (5386, 5388), False, 'from llama_index.bridge.pydantic import PrivateAttr\n'), ((5894, 5914), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5912, 5914), False, 'import nest_asyncio\n'), ((17510, 17550), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (17514, 17550), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12996, 13071), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (13006, 13071), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((10461, 10485), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (10483, 10485), False, 'import asyncio\n'), ((12388, 12433), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (12409, 12433), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13143, 13207), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (13153, 13207), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((14081, 14105), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (14103, 14105), False, 'import asyncio\n'), ((16288, 16312), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (16310, 16312), False, 'import asyncio\n'), ((19410, 19441), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (19431, 19441), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3872, 3894), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3878, 3894), True, 'import numpy as np\n'), ((12622, 12634), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12632, 12634), False, 'import uuid\n'), ((20084, 20227), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (20092, 20227), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Base query engine."""
import logging
from abc import abstractmethod
from typing import Any, Dict, List, Optional, Sequence
from llama_index.core.base.query_pipeline.query import (
ChainableMixin,
InputKeys,
OutputKeys,
QueryComponent,
validate_and_convert_stringable,
)
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.prompts.mixin import PromptDictType, PromptMixin
from llama_index.core.schema import NodeWithScore, QueryBundle, QueryType
from llama_index.core.instrumentation.events.query import (
QueryEndEvent,
QueryStartEvent,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
logger = logging.getLogger(__name__)
class BaseQueryEngine(ChainableMixin, PromptMixin):
"""Base query engine."""
def __init__(
self,
callback_manager: Optional[CallbackManager],
) -> None:
self.callback_manager = callback_manager or CallbackManager([])
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
@dispatcher.span
def query(self, str_or_query_bundle: QueryType) -> RESPONSE_TYPE:
dispatcher.event(QueryStartEvent())
with self.callback_manager.as_trace("query"):
if isinstance(str_or_query_bundle, str):
str_or_query_bundle = QueryBundle(str_or_query_bundle)
query_result = self._query(str_or_query_bundle)
dispatcher.event(QueryEndEvent())
return query_result
@dispatcher.span
async def aquery(self, str_or_query_bundle: QueryType) -> RESPONSE_TYPE:
dispatcher.event(QueryStartEvent())
with self.callback_manager.as_trace("query"):
if isinstance(str_or_query_bundle, str):
str_or_query_bundle = QueryBundle(str_or_query_bundle)
query_result = await self._aquery(str_or_query_bundle)
dispatcher.event(QueryEndEvent())
return query_result
def retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
raise NotImplementedError(
"This query engine does not support retrieve, use query directly"
)
def synthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
raise NotImplementedError(
"This query engine does not support synthesize, use query directly"
)
async def asynthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
raise NotImplementedError(
"This query engine does not support asynthesize, use aquery directly"
)
@abstractmethod
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
pass
@abstractmethod
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
pass
def _as_query_component(self, **kwargs: Any) -> QueryComponent:
"""Return a query component."""
return QueryEngineComponent(query_engine=self)
class QueryEngineComponent(QueryComponent):
"""Query engine component."""
query_engine: BaseQueryEngine = Field(..., description="Query engine")
class Config:
arbitrary_types_allowed = True
def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
self.query_engine.callback_manager = callback_manager
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
# make sure input is a string
input["input"] = validate_and_convert_stringable(input["input"])
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
output = self.query_engine.query(kwargs["input"])
return {"output": output}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component."""
output = await self.query_engine.aquery(kwargs["input"])
return {"output": output}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
return InputKeys.from_keys({"input"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
| [
"llama_index.core.instrumentation.events.query.QueryStartEvent",
"llama_index.core.instrumentation.events.query.QueryEndEvent",
"llama_index.core.base.query_pipeline.query.InputKeys.from_keys",
"llama_index.core.base.query_pipeline.query.OutputKeys.from_keys",
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.callbacks.base.CallbackManager",
"llama_index.core.base.query_pipeline.query.validate_and_convert_stringable",
"llama_index.core.schema.QueryBundle"
] | [((785, 820), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (810, 820), True, 'import llama_index.core.instrumentation as instrument\n'), ((830, 857), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (847, 857), False, 'import logging\n'), ((3558, 3596), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Query engine"""'}), "(..., description='Query engine')\n", (3563, 3596), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((4042, 4089), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['input']"], {}), "(input['input'])\n", (4073, 4089), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((4568, 4598), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'input'}"], {}), "({'input'})\n", (4587, 4598), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((4697, 4729), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (4717, 4729), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((1094, 1113), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (1109, 1113), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((1418, 1435), 'llama_index.core.instrumentation.events.query.QueryStartEvent', 'QueryStartEvent', ([], {}), '()\n', (1433, 1435), False, 'from llama_index.core.instrumentation.events.query import QueryEndEvent, QueryStartEvent\n'), ((1700, 1715), 'llama_index.core.instrumentation.events.query.QueryEndEvent', 'QueryEndEvent', ([], {}), '()\n', (1713, 1715), False, 'from llama_index.core.instrumentation.events.query import QueryEndEvent, QueryStartEvent\n'), ((1869, 1886), 'llama_index.core.instrumentation.events.query.QueryStartEvent', 'QueryStartEvent', ([], {}), '()\n', (1884, 1886), False, 'from llama_index.core.instrumentation.events.query import QueryEndEvent, QueryStartEvent\n'), ((2158, 2173), 'llama_index.core.instrumentation.events.query.QueryEndEvent', 'QueryEndEvent', ([], {}), '()\n', (2171, 2173), False, 'from llama_index.core.instrumentation.events.query import QueryEndEvent, QueryStartEvent\n'), ((1582, 1614), 'llama_index.core.schema.QueryBundle', 'QueryBundle', (['str_or_query_bundle'], {}), '(str_or_query_bundle)\n', (1593, 1614), False, 'from llama_index.core.schema import NodeWithScore, QueryBundle, QueryType\n'), ((2033, 2065), 'llama_index.core.schema.QueryBundle', 'QueryBundle', (['str_or_query_bundle'], {}), '(str_or_query_bundle)\n', (2044, 2065), False, 'from llama_index.core.schema import NodeWithScore, QueryBundle, QueryType\n')] |
import logging
import sys
import torch
import bentoml
import llama_index
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.llms import HuggingFaceLLM
import typing as t
from typing import Any, List
from InstructorEmbedding import INSTRUCTOR
from llama_index.bridge.pydantic import PrivateAttr
from llama_index.embeddings.base import BaseEmbedding
from llama_index.prompts import PromptTemplate
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# Setup for Custom Enbedding
class InstructorEmbeddings(BaseEmbedding):
_model: INSTRUCTOR = PrivateAttr()
_instruction: str = PrivateAttr()
def __init__(
self,
instructor_model_name: str = "hkunlp/instructor-large",
instruction: str = "Represent a document for semantic search:",
**kwargs: Any,
) -> None:
self._model = INSTRUCTOR(instructor_model_name)
self._instruction = instruction
super().__init__(**kwargs)
@classmethod
def class_name(cls) -> str:
return "instructor"
async def _aget_query_embedding(self, query: str) -> List[float]:
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
return self._get_text_embedding(text)
def _get_query_embedding(self, query: str) -> List[float]:
embeddings = self._model.encode([[self._instruction, query]])
return embeddings[0]
def _get_text_embedding(self, text: str) -> List[float]:
embeddings = self._model.encode([[self._instruction, text]])
return embeddings[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
embeddings = self._model.encode(
[[self._instruction, text] for text in texts]
)
return embeddings
class LlamaIndex(bentoml.Runnable):
SUPPORTED_RESOURCES = ("nvidia.com/gpu",)
SUPPORTS_CPU_MULTI_THREADING = False
def __init__(self):
# load documents
documents = SimpleDirectoryReader("/docs/vessl-docs-dataset/").load_data()
# This will wrap the default prompts that are internal to LlamaIndex
query_wrapper_prompt = PromptTemplate(
"Given the context information and not prior knowledge, answer the query.\n\n"
"### Instruction:\n{query_str}\n\n### Response:"
)
context_window = 1024
max_length = 1024
num_output = 256
embed_batch_size = 2
chunk_size = 256
llm = HuggingFaceLLM(
context_window=context_window,
max_new_tokens=num_output,
generate_kwargs={"temperature": 0.25, "do_sample": False},
query_wrapper_prompt=query_wrapper_prompt,
tokenizer_name="/data/llama-2-7b-hf",
model_name="/data/llama-2-7b-hf",
device_map="auto",
tokenizer_kwargs={"max_length": max_length},
# uncomment this if using CUDA to reduce memory usage
model_kwargs={"torch_dtype": torch.float16}
)
service_context = ServiceContext.from_defaults(llm=llm, context_window=context_window, num_output=num_output, embed_model=InstructorEmbeddings(embed_batch_size=embed_batch_size), chunk_size=chunk_size)
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
query_engine = index.as_query_engine()
self.query_engine = query_engine
@bentoml.Runnable.method(batchable=False)
def generate(self, input_text: str) -> bool:
# set Logging to DEBUG for more detailed outputs
result = self.query_engine.query(input_text)
print("Query: " + input_text)
print("Answer: ")
print(result)
return result
llamaindex_runner = t.cast("RunnerImpl", bentoml.Runner(LlamaIndex, name="llamaindex"))
svc = bentoml.Service("llamaindex_service", runners=[llamaindex_runner])
@svc.api(input=bentoml.io.Text(), output=bentoml.io.JSON())
async def infer(text: str) -> str:
result = await llamaindex_runner.generate.async_run(text)
return result
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.prompts.PromptTemplate",
"llama_index.llms.HuggingFaceLLM",
"llama_index.bridge.pydantic.PrivateAttr"
] | [((446, 504), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (465, 504), False, 'import logging\n'), ((3971, 4037), 'bentoml.Service', 'bentoml.Service', (['"""llamaindex_service"""'], {'runners': '[llamaindex_runner]'}), "('llamaindex_service', runners=[llamaindex_runner])\n", (3986, 4037), False, 'import bentoml\n'), ((536, 576), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (557, 576), False, 'import logging\n'), ((677, 690), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (688, 690), False, 'from llama_index.bridge.pydantic import PrivateAttr\n'), ((715, 728), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (726, 728), False, 'from llama_index.bridge.pydantic import PrivateAttr\n'), ((3562, 3602), 'bentoml.Runnable.method', 'bentoml.Runnable.method', ([], {'batchable': '(False)'}), '(batchable=False)\n', (3585, 3602), False, 'import bentoml\n'), ((3917, 3962), 'bentoml.Runner', 'bentoml.Runner', (['LlamaIndex'], {'name': '"""llamaindex"""'}), "(LlamaIndex, name='llamaindex')\n", (3931, 3962), False, 'import bentoml\n'), ((505, 524), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (522, 524), False, 'import logging\n'), ((958, 991), 'InstructorEmbedding.INSTRUCTOR', 'INSTRUCTOR', (['instructor_model_name'], {}), '(instructor_model_name)\n', (968, 991), False, 'from InstructorEmbedding import INSTRUCTOR\n'), ((2288, 2437), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['"""Given the context information and not prior knowledge, answer the query.\n\n### Instruction:\n{query_str}\n\n### Response:"""'], {}), '(\n """Given the context information and not prior knowledge, answer the query.\n\n### Instruction:\n{query_str}\n\n### Response:"""\n )\n', (2302, 2437), False, 'from llama_index.prompts import PromptTemplate\n'), ((2617, 2995), 'llama_index.llms.HuggingFaceLLM', 'HuggingFaceLLM', ([], {'context_window': 'context_window', 'max_new_tokens': 'num_output', 'generate_kwargs': "{'temperature': 0.25, 'do_sample': False}", 'query_wrapper_prompt': 'query_wrapper_prompt', 'tokenizer_name': '"""/data/llama-2-7b-hf"""', 'model_name': '"""/data/llama-2-7b-hf"""', 'device_map': '"""auto"""', 'tokenizer_kwargs': "{'max_length': max_length}", 'model_kwargs': "{'torch_dtype': torch.float16}"}), "(context_window=context_window, max_new_tokens=num_output,\n generate_kwargs={'temperature': 0.25, 'do_sample': False},\n query_wrapper_prompt=query_wrapper_prompt, tokenizer_name=\n '/data/llama-2-7b-hf', model_name='/data/llama-2-7b-hf', device_map=\n 'auto', tokenizer_kwargs={'max_length': max_length}, model_kwargs={\n 'torch_dtype': torch.float16})\n", (2631, 2995), False, 'from llama_index.llms import HuggingFaceLLM\n'), ((3384, 3459), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (3415, 3459), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((4053, 4070), 'bentoml.io.Text', 'bentoml.io.Text', ([], {}), '()\n', (4068, 4070), False, 'import bentoml\n'), ((4079, 4096), 'bentoml.io.JSON', 'bentoml.io.JSON', ([], {}), '()\n', (4094, 4096), False, 'import bentoml\n'), ((2116, 2166), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""/docs/vessl-docs-dataset/"""'], {}), "('/docs/vessl-docs-dataset/')\n", (2137, 2166), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n')] |
import llama_index
import chromadb
from importlib.metadata import version
print(f"LlamaIndex version: {version('llama_index')}")
print(f"Chroma version: {version('chromadb')}")
# Load API key from .env file
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
# Define embedding model and LLM
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core.settings import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)
Settings.embed_model = OpenAIEmbedding()
# Load the index with some example data
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader(
input_files=["./data/paul_graham_essay.txt"],
).load_data()
# Chunk documents into nodes
from llama_index.core.node_parser import SentenceWindowNodeParser
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
# Extract nodes from documents
nodes = node_parser.get_nodes_from_documents(documents)
# Build the index
client = chromadb.EphemeralClient()
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
embed_model = HuggingFaceEmbedding(
model_name="BAAI/bge-base-en-v1.5",
device="cuda",
)
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.chroma import ChromaVectorStore
index_name = "MyExternalContent"
# Construct vector store
vector_store = ChromaVectorStore(
chroma_collection=client.create_collection(name=index_name),
)
# Set up the storage for the embeddings
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# Setup the index
# build VectorStoreIndex that takes care of chunking documents
# and encoding chunks to embeddings for future retrieval
index = VectorStoreIndex(
nodes,
storage_context=storage_context,
embed_model=embed_model,
)
# Setup the query engine
from llama_index.core.postprocessor import MetadataReplacementPostProcessor
# The target key defaults to `window` to match the node_parser's default
postproc = MetadataReplacementPostProcessor(
target_metadata_key="window",
)
from llama_index.core.postprocessor import SentenceTransformerRerank
# Define reranker model
rerank = SentenceTransformerRerank(
top_n = 2,
model = "BAAI/bge-reranker-base",
device = "cuda",
)
query_engine = index.as_query_engine(
similarity_top_k = 6,
vector_store_query_mode="hybrid",
alpha=0.5,
node_postprocessors = [postproc, rerank],
)
# Run a query against the naive RAG implementation
response = query_engine.query(
"What happened at InterLeaf?",
)
print(response)
window = response.source_nodes[0].node.metadata["window"]
sentence = response.source_nodes[0].node.metadata["original_text"]
print(f"Window: {window}")
print("------------------")
print(f"Original Sentence: {sentence}") | [
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.llms.openai.OpenAI",
"llama_index.core.VectorStoreIndex",
"llama_index.core.postprocessor.MetadataReplacementPostProcessor",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.core.postprocessor.SentenceTransformerRerank",
"llama_index.core.SimpleDirectoryReader",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((511, 557), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.1)'}), "(model='gpt-3.5-turbo', temperature=0.1)\n", (517, 557), False, 'from llama_index.llms.openai import OpenAI\n'), ((582, 599), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (597, 599), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((981, 1113), 'llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(3)', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=3, window_metadata_key=\n 'window', original_text_metadata_key='original_text')\n", (1019, 1113), False, 'from llama_index.core.node_parser import SentenceWindowNodeParser\n'), ((1252, 1278), 'chromadb.EphemeralClient', 'chromadb.EphemeralClient', ([], {}), '()\n', (1276, 1278), False, 'import chromadb\n'), ((1367, 1438), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-base-en-v1.5"""', 'device': '"""cuda"""'}), "(model_name='BAAI/bge-base-en-v1.5', device='cuda')\n", (1387, 1438), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((1812, 1867), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1840, 1867), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((2020, 2106), 'llama_index.core.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'storage_context': 'storage_context', 'embed_model': 'embed_model'}), '(nodes, storage_context=storage_context, embed_model=\n embed_model)\n', (2036, 2106), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((2316, 2378), 'llama_index.core.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (2348, 2378), False, 'from llama_index.core.postprocessor import MetadataReplacementPostProcessor\n'), ((2497, 2583), 'llama_index.core.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': '(2)', 'model': '"""BAAI/bge-reranker-base"""', 'device': '"""cuda"""'}), "(top_n=2, model='BAAI/bge-reranker-base', device=\n 'cuda')\n", (2522, 2583), False, 'from llama_index.core.postprocessor import SentenceTransformerRerank\n'), ((289, 302), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (300, 302), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((712, 779), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['./data/paul_graham_essay.txt']"}), "(input_files=['./data/paul_graham_essay.txt'])\n", (733, 779), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((108, 130), 'importlib.metadata.version', 'version', (['"""llama_index"""'], {}), "('llama_index')\n", (115, 130), False, 'from importlib.metadata import version\n'), ((160, 179), 'importlib.metadata.version', 'version', (['"""chromadb"""'], {}), "('chromadb')\n", (167, 179), False, 'from importlib.metadata import version\n')] |
import os, openai
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
# pip install google-search-results
from dotenv import load_dotenv
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from Roku_cs_agent import formatter, roku_agent
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["https://www.jgmancilla.com"], # Adjust this to your needs
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
##############################################################################################################
import llama_index, os
from llama_index import ServiceContext, StorageContext
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from llama_index.indices.loading import load_index_from_storage
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0
)
llm_embeddings = OpenAIEmbeddings()
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=llm_embeddings
)
llama_index.set_global_service_context(service_context)
# The other computational tasks
representative_storage_context = StorageContext.from_defaults(persist_dir="index_representative")
personal_index = load_index_from_storage(representative_storage_context)
representative_query_engine = personal_index.as_query_engine()
##############################################################################################################
class Question(BaseModel):
question: str
@app.post('/representative')
def representative(input: Question):
response = representative_query_engine.query(input.question)
return response
@app.post('/query_cs')
def query(input: Question):
response = roku_agent.run(input.question)
return response
@app.post('/spotlight')
def query_spotlight(input: Question):
response = formatter.query_cs(input.question)
return response | [
"llama_index.ServiceContext.from_defaults",
"llama_index.indices.loading.load_index_from_storage",
"llama_index.set_global_service_context",
"llama_index.StorageContext.from_defaults"
] | [((49, 62), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (60, 62), False, 'from dotenv import load_dotenv\n'), ((80, 112), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (94, 112), False, 'import llama_index, os\n'), ((380, 389), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (387, 389), False, 'from fastapi import FastAPI\n'), ((947, 1000), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (957, 1000), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1030, 1048), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1046, 1048), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1068, 1133), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'llm_embeddings'}), '(llm=llm, embed_model=llm_embeddings)\n', (1096, 1133), False, 'from llama_index import ServiceContext, StorageContext\n'), ((1141, 1196), 'llama_index.set_global_service_context', 'llama_index.set_global_service_context', (['service_context'], {}), '(service_context)\n', (1179, 1196), False, 'import llama_index, os\n'), ((1263, 1327), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""index_representative"""'}), "(persist_dir='index_representative')\n", (1291, 1327), False, 'from llama_index import ServiceContext, StorageContext\n'), ((1345, 1400), 'llama_index.indices.loading.load_index_from_storage', 'load_index_from_storage', (['representative_storage_context'], {}), '(representative_storage_context)\n', (1368, 1400), False, 'from llama_index.indices.loading import load_index_from_storage\n'), ((1841, 1871), 'Roku_cs_agent.roku_agent.run', 'roku_agent.run', (['input.question'], {}), '(input.question)\n', (1855, 1871), False, 'from Roku_cs_agent import formatter, roku_agent\n'), ((1974, 2008), 'Roku_cs_agent.formatter.query_cs', 'formatter.query_cs', (['input.question'], {}), '(input.question)\n', (1992, 2008), False, 'from Roku_cs_agent import formatter, roku_agent\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from private_gpt.di import global_injector
from private_gpt.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launcher import create_app\n')] |
"""
Astra DB Vector store index.
An index based on a DB table with vector search capabilities,
powered by the astrapy library
"""
import json
import logging
from typing import Any, Dict, List, Optional, cast
from warnings import warn
import llama_index.core
from llama_index.core.bridge.pydantic import PrivateAttr
from astrapy.db import AstraDB
from llama_index.core.indices.query.embedding_utils import get_top_k_mmr_embeddings
from llama_index.core.schema import BaseNode, MetadataMode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
ExactMatchFilter,
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
_logger = logging.getLogger(__name__)
DEFAULT_MMR_PREFETCH_FACTOR = 4.0
MAX_INSERT_BATCH_SIZE = 20
NON_INDEXED_FIELDS = ["metadata._node_content", "content"]
class AstraDBVectorStore(BasePydanticVectorStore):
"""
Astra DB Vector Store.
An abstraction of a Astra table with
vector-similarity-search. Documents, and their embeddings, are stored
in an Astra table and a vector-capable index is used for searches.
The table does not need to exist beforehand: if necessary it will
be created behind the scenes.
All Astra operations are done through the astrapy library.
Args:
collection_name (str): collection name to use. If not existing, it will be created.
token (str): The Astra DB Application Token to use.
api_endpoint (str): The Astra DB JSON API endpoint for your database.
embedding_dimension (int): length of the embedding vectors in use.
namespace (Optional[str]): The namespace to use. If not provided, 'default_keyspace'
ttl_seconds (Optional[int]): expiration time for inserted entries.
Default is no expiration.
"""
stores_text: bool = True
flat_metadata: bool = True
_embedding_dimension: int = PrivateAttr()
_ttl_seconds: Optional[int] = PrivateAttr()
_astra_db: Any = PrivateAttr()
_astra_db_collection: Any = PrivateAttr()
def __init__(
self,
*,
collection_name: str,
token: str,
api_endpoint: str,
embedding_dimension: int,
namespace: Optional[str] = None,
ttl_seconds: Optional[int] = None,
) -> None:
super().__init__()
# Set all the required class parameters
self._embedding_dimension = embedding_dimension
self._ttl_seconds = ttl_seconds
_logger.debug("Creating the Astra DB table")
# Build the Astra DB object
self._astra_db = AstraDB(
api_endpoint=api_endpoint,
token=token,
namespace=namespace,
caller_name=getattr(llama_index, "__name__", "llama_index"),
caller_version=getattr(llama_index.core, "__version__", None),
)
from astrapy.api import APIRequestError
try:
# Create and connect to the newly created collection
self._astra_db_collection = self._astra_db.create_collection(
collection_name=collection_name,
dimension=embedding_dimension,
options={"indexing": {"deny": NON_INDEXED_FIELDS}},
)
except APIRequestError:
# possibly the collection is preexisting and has legacy
# indexing settings: verify
get_coll_response = self._astra_db.get_collections(
options={"explain": True}
)
collections = (get_coll_response["status"] or {}).get("collections") or []
preexisting = [
collection
for collection in collections
if collection["name"] == collection_name
]
if preexisting:
pre_collection = preexisting[0]
# if it has no "indexing", it is a legacy collection;
# otherwise it's unexpected warn and proceed at user's risk
pre_col_options = pre_collection.get("options") or {}
if "indexing" not in pre_col_options:
warn(
(
f"Collection '{collection_name}' is detected as "
"having indexing turned on for all fields "
"(either created manually or by older versions "
"of this plugin). This implies stricter "
"limitations on the amount of text"
" each entry can store. Consider reindexing anew on a"
" fresh collection to be able to store longer texts."
),
UserWarning,
stacklevel=2,
)
self._astra_db_collection = self._astra_db.collection(
collection_name=collection_name,
)
else:
options_json = json.dumps(pre_col_options["indexing"])
warn(
(
f"Collection '{collection_name}' has unexpected 'indexing'"
f" settings (options.indexing = {options_json})."
" This can result in odd behaviour when running "
" metadata filtering and/or unwarranted limitations"
" on storing long texts. Consider reindexing anew on a"
" fresh collection."
),
UserWarning,
stacklevel=2,
)
self._astra_db_collection = self._astra_db.collection(
collection_name=collection_name,
)
else:
# other exception
raise
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of node with embeddings
"""
# Initialize list of objects to track
nodes_list = []
# Process each node individually
for node in nodes:
# Get the metadata
metadata = node_to_metadata_dict(
node,
remove_text=True,
flat_metadata=self.flat_metadata,
)
# One dictionary of node data per node
nodes_list.append(
{
"_id": node.node_id,
"content": node.get_content(metadata_mode=MetadataMode.NONE),
"metadata": metadata,
"$vector": node.get_embedding(),
}
)
# Log the number of rows being added
_logger.debug(f"Adding {len(nodes_list)} rows to table")
# Initialize an empty list to hold the batches
batched_list = []
# Iterate over the node_list in steps of MAX_INSERT_BATCH_SIZE
for i in range(0, len(nodes_list), MAX_INSERT_BATCH_SIZE):
# Append a slice of node_list to the batched_list
batched_list.append(nodes_list[i : i + MAX_INSERT_BATCH_SIZE])
# Perform the bulk insert
for i, batch in enumerate(batched_list):
_logger.debug(f"Processing batch #{i + 1} of size {len(batch)}")
# Go to astrapy to perform the bulk insert
self._astra_db_collection.insert_many(batch)
# Return the list of ids
return [str(n["_id"]) for n in nodes_list]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The id of the document to delete.
"""
_logger.debug("Deleting a document from the Astra table")
self._astra_db_collection.delete(id=ref_doc_id, **delete_kwargs)
@property
def client(self) -> Any:
"""Return the underlying Astra vector table object."""
return self._astra_db_collection
@staticmethod
def _query_filters_to_dict(query_filters: MetadataFilters) -> Dict[str, Any]:
# Allow only legacy ExactMatchFilter and MetadataFilter with FilterOperator.EQ
if not all(
(
isinstance(f, ExactMatchFilter)
or (isinstance(f, MetadataFilter) and f.operator == FilterOperator.EQ)
)
for f in query_filters.filters
):
raise NotImplementedError(
"Only filters with operator=FilterOperator.EQ are supported"
)
return {f"metadata.{f.key}": f.value for f in query_filters.filters}
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
# Get the currently available query modes
_available_query_modes = [
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.MMR,
]
# Reject query if not available
if query.mode not in _available_query_modes:
raise NotImplementedError(f"Query mode {query.mode} not available.")
# Get the query embedding
query_embedding = cast(List[float], query.query_embedding)
# Process the metadata filters as needed
if query.filters is not None:
query_metadata = self._query_filters_to_dict(query.filters)
else:
query_metadata = {}
# Get the scores depending on the query mode
if query.mode == VectorStoreQueryMode.DEFAULT:
# Call the vector_find method of AstraPy
matches = self._astra_db_collection.vector_find(
vector=query_embedding,
limit=query.similarity_top_k,
filter=query_metadata,
)
# Get the scores associated with each
top_k_scores = [match["$similarity"] for match in matches]
elif query.mode == VectorStoreQueryMode.MMR:
# Querying a larger number of vectors and then doing MMR on them.
if (
kwargs.get("mmr_prefetch_factor") is not None
and kwargs.get("mmr_prefetch_k") is not None
):
raise ValueError(
"'mmr_prefetch_factor' and 'mmr_prefetch_k' "
"cannot coexist in a call to query()"
)
else:
if kwargs.get("mmr_prefetch_k") is not None:
prefetch_k0 = int(kwargs["mmr_prefetch_k"])
else:
prefetch_k0 = int(
query.similarity_top_k
* kwargs.get("mmr_prefetch_factor", DEFAULT_MMR_PREFETCH_FACTOR)
)
# Get the most we can possibly need to fetch
prefetch_k = max(prefetch_k0, query.similarity_top_k)
# Call AstraPy to fetch them
prefetch_matches = self._astra_db_collection.vector_find(
vector=query_embedding,
limit=prefetch_k,
filter=query_metadata,
)
# Get the MMR threshold
mmr_threshold = query.mmr_threshold or kwargs.get("mmr_threshold")
# If we have found documents, we can proceed
if prefetch_matches:
zipped_indices, zipped_embeddings = zip(
*enumerate(match["$vector"] for match in prefetch_matches)
)
pf_match_indices, pf_match_embeddings = list(zipped_indices), list(
zipped_embeddings
)
else:
pf_match_indices, pf_match_embeddings = [], []
# Call the Llama utility function to get the top k
mmr_similarities, mmr_indices = get_top_k_mmr_embeddings(
query_embedding,
pf_match_embeddings,
similarity_top_k=query.similarity_top_k,
embedding_ids=pf_match_indices,
mmr_threshold=mmr_threshold,
)
# Finally, build the final results based on the mmr values
matches = [prefetch_matches[mmr_index] for mmr_index in mmr_indices]
top_k_scores = mmr_similarities
# We have three lists to return
top_k_nodes = []
top_k_ids = []
# Get every match
for match in matches:
# Check whether we have a llama-generated node content field
if "_node_content" not in match["metadata"]:
match["metadata"]["_node_content"] = json.dumps(match)
# Create a new node object from the node metadata
node = metadata_dict_to_node(match["metadata"], text=match["content"])
# Append to the respective lists
top_k_nodes.append(node)
top_k_ids.append(match["_id"])
# return our final result
return VectorStoreQueryResult(
nodes=top_k_nodes,
similarities=top_k_scores,
ids=top_k_ids,
)
| [
"llama_index.core.vector_stores.utils.metadata_dict_to_node",
"llama_index.core.vector_stores.utils.node_to_metadata_dict",
"llama_index.core.indices.query.embedding_utils.get_top_k_mmr_embeddings",
"llama_index.core.bridge.pydantic.PrivateAttr",
"llama_index.core.vector_stores.types.VectorStoreQueryResult"
] | [((852, 879), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (869, 879), False, 'import logging\n'), ((2070, 2083), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2081, 2083), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2118, 2131), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2129, 2131), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2153, 2166), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2164, 2166), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2199, 2212), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2210, 2212), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((9525, 9565), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (9529, 9565), False, 'from typing import Any, Dict, List, Optional, cast\n'), ((13259, 13347), 'llama_index.core.vector_stores.types.VectorStoreQueryResult', 'VectorStoreQueryResult', ([], {'nodes': 'top_k_nodes', 'similarities': 'top_k_scores', 'ids': 'top_k_ids'}), '(nodes=top_k_nodes, similarities=top_k_scores, ids=\n top_k_ids)\n', (13281, 13347), False, 'from llama_index.core.vector_stores.types import BasePydanticVectorStore, ExactMatchFilter, FilterOperator, MetadataFilter, MetadataFilters, VectorStoreQuery, VectorStoreQueryMode, VectorStoreQueryResult\n'), ((6510, 6589), 'llama_index.core.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (6531, 6589), False, 'from llama_index.core.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13019, 13082), 'llama_index.core.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (["match['metadata']"], {'text': "match['content']"}), "(match['metadata'], text=match['content'])\n", (13040, 13082), False, 'from llama_index.core.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12133, 12305), 'llama_index.core.indices.query.embedding_utils.get_top_k_mmr_embeddings', 'get_top_k_mmr_embeddings', (['query_embedding', 'pf_match_embeddings'], {'similarity_top_k': 'query.similarity_top_k', 'embedding_ids': 'pf_match_indices', 'mmr_threshold': 'mmr_threshold'}), '(query_embedding, pf_match_embeddings,\n similarity_top_k=query.similarity_top_k, embedding_ids=pf_match_indices,\n mmr_threshold=mmr_threshold)\n', (12157, 12305), False, 'from llama_index.core.indices.query.embedding_utils import get_top_k_mmr_embeddings\n'), ((12919, 12936), 'json.dumps', 'json.dumps', (['match'], {}), '(match)\n', (12929, 12936), False, 'import json\n'), ((4284, 4638), 'warnings.warn', 'warn', (['f"""Collection \'{collection_name}\' is detected as having indexing turned on for all fields (either created manually or by older versions of this plugin). This implies stricter limitations on the amount of text each entry can store. Consider reindexing anew on a fresh collection to be able to store longer texts."""', 'UserWarning'], {'stacklevel': '(2)'}), '(\n f"Collection \'{collection_name}\' is detected as having indexing turned on for all fields (either created manually or by older versions of this plugin). This implies stricter limitations on the amount of text each entry can store. Consider reindexing anew on a fresh collection to be able to store longer texts."\n , UserWarning, stacklevel=2)\n', (4288, 4638), False, 'from warnings import warn\n'), ((5177, 5216), 'json.dumps', 'json.dumps', (["pre_col_options['indexing']"], {}), "(pre_col_options['indexing'])\n", (5187, 5216), False, 'import json\n'), ((5237, 5553), 'warnings.warn', 'warn', (['f"""Collection \'{collection_name}\' has unexpected \'indexing\' settings (options.indexing = {options_json}). This can result in odd behaviour when running metadata filtering and/or unwarranted limitations on storing long texts. Consider reindexing anew on a fresh collection."""', 'UserWarning'], {'stacklevel': '(2)'}), '(\n f"Collection \'{collection_name}\' has unexpected \'indexing\' settings (options.indexing = {options_json}). This can result in odd behaviour when running metadata filtering and/or unwarranted limitations on storing long texts. Consider reindexing anew on a fresh collection."\n , UserWarning, stacklevel=2)\n', (5241, 5553), False, 'from warnings import warn\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.core.base.response.schema import Response
from llama_index.core.bridge.pydantic import BaseModel # type: ignore
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.query.schema import QueryBundle
from llama_index.core.llms.mock import MockLLM
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.core.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.core.types import RESPONSE_TEXT_TYPE
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
llm=MockLLM(),
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.vector_stores.google.genai_extension.build_generative_service",
"llama_index.core.schema.TextNode",
"llama_index.core.llms.mock.MockLLM",
"llama_index.core.base.response.schema.Response",
"llama_index.core.indices.query.schema.QueryBundle"
] | [((1057, 1084), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1074, 1084), False, 'import logging\n'), ((2707, 2740), 'llama_index.vector_stores.google.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2738, 2740), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((4779, 4828), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4783, 4828), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6722, 6748), 'llama_index.core.base.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6730, 6748), False, 'from llama_index.core.base.response.schema import Response\n'), ((6805, 6833), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6816, 6833), False, 'from llama_index.core.indices.query.schema import QueryBundle\n'), ((2596, 2605), 'llama_index.core.llms.mock.MockLLM', 'MockLLM', ([], {}), '()\n', (2603, 2605), False, 'from llama_index.core.llms.mock import MockLLM\n'), ((8244, 8266), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8252, 8266), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, TextNode\n')] |
from langfuse import Langfuse
from llama_index.llms.openai import OpenAI
import llama_index.core
llama_index.core.set_global_handler("langfuse")
from llama_index.core.llms import ChatMessage
langfuse = Langfuse()
dataset = langfuse.get_dataset("term-extraction")
prompt = langfuse.get_prompt("extraction-prompt-1")
model = OpenAI(model="gpt-4-turbo-preview")
for item in dataset.items:
compiled_prompt = prompt.compile(input=item.input)
generation = langfuse.generation(prompt=prompt,model=model.model)
messages = [
ChatMessage(role="system", content="You are an API that must always respond with a json without any formatting."),
ChatMessage(role="user", content=compiled_prompt),
]
chat_completion = model.chat(messages)
print(chat_completion)
item.link(generation, "gpt-4-with-api-instructions")
generation.end(output=chat_completion)
# item.link(generation, "first-run-extraction") | [
"llama_index.llms.openai.OpenAI",
"llama_index.core.llms.ChatMessage"
] | [((203, 213), 'langfuse.Langfuse', 'Langfuse', ([], {}), '()\n', (211, 213), False, 'from langfuse import Langfuse\n'), ((325, 360), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-turbo-preview"""'}), "(model='gpt-4-turbo-preview')\n", (331, 360), False, 'from llama_index.llms.openai import OpenAI\n'), ((530, 653), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': '"""You are an API that must always respond with a json without any formatting."""'}), "(role='system', content=\n 'You are an API that must always respond with a json without any formatting.'\n )\n", (541, 653), False, 'from llama_index.core.llms import ChatMessage\n'), ((651, 700), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': '"""user"""', 'content': 'compiled_prompt'}), "(role='user', content=compiled_prompt)\n", (662, 700), False, 'from llama_index.core.llms import ChatMessage\n')] |
from unittest.mock import MagicMock, patch
import pytest
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.schema import Document
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.indices.managed.google.generativeai import (
GoogleIndex,
set_google_config,
)
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_from_corpus(mock_get_corpus: MagicMock) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
# Act
store = GoogleIndex.from_corpus(corpus_id="123")
# Assert
assert store.corpus_id == "123"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
def test_create_corpus(mock_create_corpus: MagicMock) -> None:
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_create_corpus.side_effect = fake_create_corpus
# Act
store = GoogleIndex.create_corpus(display_name="My first corpus")
# Assert
assert len(store.corpus_id) > 0
assert mock_create_corpus.call_count == 1
request = mock_create_corpus.call_args.args[0]
assert request.corpus.name == f"corpora/{store.corpus_id}"
assert request.corpus.display_name == "My first corpus"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_document")
@patch("google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_document")
def test_from_documents(
mock_get_document: MagicMock,
mock_batch_create_chunk: MagicMock,
mock_create_document: MagicMock,
mock_create_corpus: MagicMock,
) -> None:
from google.api_core import exceptions as gapi_exception
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_get_document.side_effect = gapi_exception.NotFound("")
mock_create_corpus.side_effect = fake_create_corpus
mock_create_document.return_value = genai.Document(name="corpora/123/documents/456")
mock_batch_create_chunk.side_effect = [
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/777"),
]
),
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/888"),
]
),
]
# Act
index = GoogleIndex.from_documents(
[
Document(text="Hello, my darling"),
Document(text="Goodbye, my baby"),
]
)
# Assert
assert mock_create_corpus.call_count == 1
create_corpus_request = mock_create_corpus.call_args.args[0]
assert create_corpus_request.corpus.name == f"corpora/{index.corpus_id}"
create_document_request = mock_create_document.call_args.args[0]
assert create_document_request.parent == f"corpora/{index.corpus_id}"
assert mock_batch_create_chunk.call_count == 2
first_batch_request = mock_batch_create_chunk.call_args_list[0].args[0]
assert (
first_batch_request.requests[0].chunk.data.string_value == "Hello, my darling"
)
second_batch_request = mock_batch_create_chunk.call_args_list[1].args[0]
assert (
second_batch_request.requests[0].chunk.data.string_value == "Goodbye, my baby"
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.query_corpus")
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_as_query_engine(
mock_get_corpus: MagicMock,
mock_generate_answer: MagicMock,
mock_query_corpus: MagicMock,
) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
mock_query_corpus.return_value = genai.QueryCorpusResponse(
relevant_chunks=[
genai.RelevantChunk(
chunk=genai.Chunk(
name="corpora/123/documents/456/chunks/789",
data=genai.ChunkData(string_value="It's 42"),
),
chunk_relevance_score=0.9,
)
]
)
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/777",
part_index=0,
)
),
),
genai.GroundingAttribution(
content=genai.Content(parts=[genai.Part(text="Or maybe not")]),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/888",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.9,
)
# Act
index = GoogleIndex.from_corpus(corpus_id="123")
query_engine = index.as_query_engine(
answer_style=genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE
)
response = query_engine.query("What is the meaning of life?")
# Assert
assert mock_query_corpus.call_count == 1
query_corpus_request = mock_query_corpus.call_args.args[0]
assert query_corpus_request.name == "corpora/123"
assert query_corpus_request.query == "What is the meaning of life?"
assert isinstance(response, Response)
assert response.response == "42"
assert mock_generate_answer.call_count == 1
generate_answer_request = mock_generate_answer.call_args.args[0]
assert (
generate_answer_request.contents[0].parts[0].text
== "What is the meaning of life?"
)
assert (
generate_answer_request.answer_style
== genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE
)
passages = generate_answer_request.inline_passages.passages
assert len(passages) == 1
passage = passages[0]
assert passage.content.parts[0].text == "It's 42"
| [
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config",
"llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.create_corpus",
"llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus",
"llama_index.legacy.schema.Document",
"llama_index.legacy.indices.managed.google.generativeai.set_google_config"
] | [((693, 752), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (711, 752), False, 'import pytest\n'), ((754, 798), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (759, 798), False, 'from unittest.mock import MagicMock, patch\n'), ((1012, 1071), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1030, 1071), False, 'import pytest\n'), ((1073, 1144), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (1078, 1144), False, 'from unittest.mock import MagicMock, patch\n'), ((1402, 1461), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1420, 1461), False, 'import pytest\n'), ((1463, 1537), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (1468, 1537), False, 'from unittest.mock import MagicMock, patch\n'), ((2137, 2196), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (2155, 2196), False, 'import pytest\n'), ((2198, 2272), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (2203, 2272), False, 'from unittest.mock import MagicMock, patch\n'), ((2274, 2350), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_document')\n", (2279, 2350), False, 'from unittest.mock import MagicMock, patch\n'), ((2352, 2437), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks'\n )\n", (2357, 2437), False, 'from unittest.mock import MagicMock, patch\n'), ((2434, 2507), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_document')\n", (2439, 2507), False, 'from unittest.mock import MagicMock, patch\n'), ((4398, 4457), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (4416, 4457), False, 'import pytest\n'), ((4459, 4532), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.query_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.query_corpus')\n", (4464, 4532), False, 'from unittest.mock import MagicMock, patch\n'), ((4534, 4611), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (4539, 4611), False, 'from unittest.mock import MagicMock, patch\n'), ((4613, 4684), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (4618, 4684), False, 'from unittest.mock import MagicMock, patch\n'), ((570, 671), 'llama_index.legacy.indices.managed.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (587, 671), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((868, 920), 'llama_index.legacy.indices.managed.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (885, 920), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((934, 953), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (951, 953), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((1252, 1284), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (1264, 1284), True, 'import google.ai.generativelanguage as genai\n'), ((1308, 1348), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (1331, 1348), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((1805, 1862), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.create_corpus', 'GoogleIndex.create_corpus', ([], {'display_name': '"""My first corpus"""'}), "(display_name='My first corpus')\n", (1830, 1862), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((2913, 2940), 'google.api_core.exceptions.NotFound', 'gapi_exception.NotFound', (['""""""'], {}), "('')\n", (2936, 2940), True, 'from google.api_core import exceptions as gapi_exception\n'), ((3037, 3085), 'google.ai.generativelanguage.Document', 'genai.Document', ([], {'name': '"""corpora/123/documents/456"""'}), "(name='corpora/123/documents/456')\n", (3051, 3085), True, 'import google.ai.generativelanguage as genai\n'), ((4874, 4906), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (4886, 4906), True, 'import google.ai.generativelanguage as genai\n'), ((6624, 6664), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (6647, 6664), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((3531, 3565), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': '"""Hello, my darling"""'}), "(text='Hello, my darling')\n", (3539, 3565), False, 'from llama_index.legacy.schema import Document\n'), ((3579, 3612), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': '"""Goodbye, my baby"""'}), "(text='Goodbye, my baby')\n", (3587, 3612), False, 'from llama_index.legacy.schema import Document\n'), ((3208, 3264), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/777"""'}), "(name='corpora/123/documents/456/chunks/777')\n", (3219, 3264), True, 'import google.ai.generativelanguage as genai\n'), ((3369, 3425), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/888"""'}), "(name='corpora/123/documents/456/chunks/888')\n", (3380, 3425), True, 'import google.ai.generativelanguage as genai\n'), ((5155, 5194), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""It\'s 42"""'}), '(string_value="It\'s 42")\n', (5170, 5194), True, 'import google.ai.generativelanguage as genai\n'), ((5431, 5452), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (5441, 5452), True, 'import google.ai.generativelanguage as genai\n'), ((5775, 5889), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/777"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/777', part_index=0)\n", (5819, 5889), True, 'import google.ai.generativelanguage as genai\n'), ((6237, 6351), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/888"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/888', part_index=0)\n", (6281, 6351), True, 'import google.ai.generativelanguage as genai\n'), ((5611, 5651), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42"""'}), "(text='Meaning of life is 42')\n", (5621, 5651), True, 'import google.ai.generativelanguage as genai\n'), ((6103, 6134), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Or maybe not"""'}), "(text='Or maybe not')\n", (6113, 6134), True, 'import google.ai.generativelanguage as genai\n')] |
from unittest.mock import MagicMock, patch
import pytest
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.response_synthesizers.google.generativeai import (
GoogleTextSynthesizer,
set_google_config,
)
from llama_index.legacy.schema import NodeWithScore, TextNode
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_get_response(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42.")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/789",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.7,
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.5,
answer_style=genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_setting=[
genai.SafetySetting(
category=genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
)
],
)
response = synthesizer.get_response(
query_str="What is the meaning of life?",
text_chunks=[
"It's 42",
],
)
# Assert
assert response.answer == "42"
assert response.attributed_passages == ["Meaning of life is 42."]
assert response.answerable_probability == pytest.approx(0.7)
assert mock_generate_answer.call_count == 1
request = mock_generate_answer.call_args.args[0]
assert request.contents[0].parts[0].text == "What is the meaning of life?"
assert request.answer_style == genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
assert len(request.safety_settings) == 1
assert (
request.safety_settings[0].category
== genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
)
assert (
request.safety_settings[0].threshold
== genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
)
assert request.temperature == 0.5
passages = request.inline_passages.passages
assert len(passages) == 1
passage = passages[0]
assert passage.content.parts[0].text == "It's 42"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/777",
part_index=0,
)
),
),
genai.GroundingAttribution(
content=genai.Content(parts=[genai.Part(text="Or maybe not")]),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/888",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.9,
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
response = synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
additional_source_nodes=[
NodeWithScore(
node=TextNode(text="Additional node"),
score=0.4,
),
],
)
# Assert
assert response.response == "42"
assert len(response.source_nodes) == 4
first_attributed_source = response.source_nodes[0]
assert first_attributed_source.node.text == "Meaning of life is 42"
assert first_attributed_source.score is None
second_attributed_source = response.source_nodes[1]
assert second_attributed_source.node.text == "Or maybe not"
assert second_attributed_source.score is None
first_input_source = response.source_nodes[2]
assert first_input_source.node.text == "It's 42"
assert first_input_source.score == pytest.approx(0.5)
first_additional_source = response.source_nodes[3]
assert first_additional_source.node.text == "Additional node"
assert first_additional_source.score == pytest.approx(0.4)
assert response.metadata is not None
assert response.metadata.get("answerable_probability", None) == pytest.approx(0.9)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_max_token_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.MAX_TOKENS,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "Maximum token" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_safety_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.SAFETY,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "safety" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_recitation_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.RECITATION,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "recitation" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_unknown_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.OTHER,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "Unexpected" in str(e.value)
| [
"llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.response_synthesizers.google.generativeai.set_google_config",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config"
] | [((663, 722), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (681, 722), False, 'import pytest\n'), ((724, 768), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (729, 768), False, 'from unittest.mock import MagicMock, patch\n'), ((982, 1041), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1000, 1041), False, 'import pytest\n'), ((1043, 1120), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (1048, 1120), False, 'from unittest.mock import MagicMock, patch\n'), ((3580, 3639), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (3598, 3639), False, 'import pytest\n'), ((3641, 3718), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (3646, 3718), False, 'from unittest.mock import MagicMock, patch\n'), ((6499, 6558), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (6517, 6558), False, 'import pytest\n'), ((6560, 6637), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (6565, 6637), False, 'from unittest.mock import MagicMock, patch\n'), ((7434, 7493), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (7452, 7493), False, 'import pytest\n'), ((7495, 7572), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (7500, 7572), False, 'from unittest.mock import MagicMock, patch\n'), ((8355, 8414), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (8373, 8414), False, 'import pytest\n'), ((8416, 8493), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (8421, 8493), False, 'from unittest.mock import MagicMock, patch\n'), ((9288, 9347), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (9306, 9347), False, 'import pytest\n'), ((9349, 9426), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (9354, 9426), False, 'from unittest.mock import MagicMock, patch\n'), ((540, 641), 'llama_index.legacy.response_synthesizers.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (557, 641), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((838, 890), 'llama_index.legacy.response_synthesizers.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (855, 890), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((904, 923), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (921, 923), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5137, 5174), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (5172, 5174), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((7037, 7074), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (7072, 7074), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((7965, 8002), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (8000, 8002), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((8894, 8931), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (8929, 8931), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((9819, 9856), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (9854, 9856), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((2786, 2804), 'pytest.approx', 'pytest.approx', (['(0.7)'], {}), '(0.7)\n', (2799, 2804), False, 'import pytest\n'), ((6163, 6181), 'pytest.approx', 'pytest.approx', (['(0.5)'], {}), '(0.5)\n', (6176, 6181), False, 'import pytest\n'), ((6348, 6366), 'pytest.approx', 'pytest.approx', (['(0.4)'], {}), '(0.4)\n', (6361, 6366), False, 'import pytest\n'), ((6477, 6495), 'pytest.approx', 'pytest.approx', (['(0.9)'], {}), '(0.9)\n', (6490, 6495), False, 'import pytest\n'), ((7084, 7108), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7097, 7108), False, 'import pytest\n'), ((8012, 8036), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8025, 8036), False, 'import pytest\n'), ((8941, 8965), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8954, 8965), False, 'import pytest\n'), ((9866, 9890), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (9879, 9890), False, 'import pytest\n'), ((2253, 2413), 'google.ai.generativelanguage.SafetySetting', 'genai.SafetySetting', ([], {'category': 'genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT', 'threshold': 'genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE'}), '(category=genai.HarmCategory.\n HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=genai.SafetySetting.\n HarmBlockThreshold.BLOCK_LOW_AND_ABOVE)\n', (2272, 2413), True, 'import google.ai.generativelanguage as genai\n'), ((6860, 6883), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (6873, 6883), True, 'import google.ai.generativelanguage as genai\n'), ((7792, 7815), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (7805, 7815), True, 'import google.ai.generativelanguage as genai\n'), ((8717, 8740), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (8730, 8740), True, 'import google.ai.generativelanguage as genai\n'), ((9647, 9670), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (9660, 9670), True, 'import google.ai.generativelanguage as genai\n'), ((5324, 5348), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (5332, 5348), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((5485, 5517), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""Additional node"""'}), "(text='Additional node')\n", (5493, 5517), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((7273, 7297), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (7281, 7297), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((8201, 8225), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (8209, 8225), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((9130, 9154), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (9138, 9154), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((10055, 10079), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (10063, 10079), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((1342, 1363), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (1352, 1363), True, 'import google.ai.generativelanguage as genai\n'), ((3938, 3959), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (3948, 3959), True, 'import google.ai.generativelanguage as genai\n'), ((1687, 1801), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/789"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/789', part_index=0)\n", (1731, 1801), True, 'import google.ai.generativelanguage as genai\n'), ((4282, 4396), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/777"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/777', part_index=0)\n", (4326, 4396), True, 'import google.ai.generativelanguage as genai\n'), ((4744, 4858), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/888"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/888', part_index=0)\n", (4788, 4858), True, 'import google.ai.generativelanguage as genai\n'), ((1522, 1563), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42."""'}), "(text='Meaning of life is 42.')\n", (1532, 1563), True, 'import google.ai.generativelanguage as genai\n'), ((4118, 4158), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42"""'}), "(text='Meaning of life is 42')\n", (4128, 4158), True, 'import google.ai.generativelanguage as genai\n'), ((4610, 4641), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Or maybe not"""'}), "(text='Or maybe not')\n", (4620, 4641), True, 'import google.ai.generativelanguage as genai\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.deepeval_callback import deepeval_callback_handler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler",
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.deepeval_callback.deepeval_callback_handler"
] | [((1068, 1103), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1088, 1103), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1161, 1204), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1189, 1204), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1262, 1307), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1292, 1307), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1361, 1402), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1387, 1402), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1458, 1491), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1476, 1491), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1544, 1584), 'llama_index.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1569, 1584), False, 'from llama_index.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1635, 1666), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1651, 1666), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.argilla_callback import argilla_callback_handler
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.deepeval_callback import deepeval_callback_handler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
handler = argilla_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler",
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.argilla_callback.argilla_callback_handler",
"llama_index.callbacks.deepeval_callback.deepeval_callback_handler"
] | [((1144, 1179), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1164, 1179), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1237, 1280), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1265, 1280), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1338, 1383), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1368, 1383), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1437, 1478), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1463, 1478), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1534, 1567), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1552, 1567), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1620, 1660), 'llama_index.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1645, 1660), False, 'from llama_index.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1711, 1742), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1727, 1742), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((1794, 1833), 'llama_index.callbacks.argilla_callback.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (1818, 1833), False, 'from llama_index.callbacks.argilla_callback import argilla_callback_handler\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(VectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
self.index_name = index_name
self.text_field = text_field
self.vector_field = vector_field
self.batch_size = batch_size
self.distance_strategy = distance_strategy
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index
return f"llama_index-py-vs/{llama_index.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if await self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships")
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.node_to_metadata_dict"
] | [((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3755, 3771), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3763, 3771), True, 'import numpy as np\n'), ((5436, 5456), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5454, 5456), False, 'import nest_asyncio\n'), ((16834, 16874), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (16838, 16874), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12320, 12395), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12330, 12395), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((9785, 9809), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9807, 9809), False, 'import asyncio\n'), ((11712, 11757), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (11733, 11757), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12467, 12531), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (12477, 12531), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13405, 13429), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (13427, 13429), False, 'import asyncio\n'), ((15612, 15636), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15634, 15636), False, 'import asyncio\n'), ((18734, 18765), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (18755, 18765), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3807, 3829), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3813, 3829), True, 'import numpy as np\n'), ((11946, 11958), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11956, 11958), False, 'import uuid\n'), ((19408, 19551), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (19416, 19551), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(VectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
self.index_name = index_name
self.text_field = text_field
self.vector_field = vector_field
self.batch_size = batch_size
self.distance_strategy = distance_strategy
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index
return f"llama_index-py-vs/{llama_index.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if await self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships")
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.node_to_metadata_dict"
] | [((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3755, 3771), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3763, 3771), True, 'import numpy as np\n'), ((5436, 5456), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5454, 5456), False, 'import nest_asyncio\n'), ((16834, 16874), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (16838, 16874), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12320, 12395), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12330, 12395), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((9785, 9809), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9807, 9809), False, 'import asyncio\n'), ((11712, 11757), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (11733, 11757), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12467, 12531), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (12477, 12531), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13405, 13429), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (13427, 13429), False, 'import asyncio\n'), ((15612, 15636), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15634, 15636), False, 'import asyncio\n'), ((18734, 18765), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (18755, 18765), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3807, 3829), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3813, 3829), True, 'import numpy as np\n'), ((11946, 11958), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11956, 11958), False, 'import uuid\n'), ((19408, 19551), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (19416, 19551), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(VectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
self.index_name = index_name
self.text_field = text_field
self.vector_field = vector_field
self.batch_size = batch_size
self.distance_strategy = distance_strategy
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index
return f"llama_index-py-vs/{llama_index.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if await self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships")
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.node_to_metadata_dict"
] | [((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3755, 3771), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3763, 3771), True, 'import numpy as np\n'), ((5436, 5456), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5454, 5456), False, 'import nest_asyncio\n'), ((16834, 16874), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (16838, 16874), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12320, 12395), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12330, 12395), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((9785, 9809), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9807, 9809), False, 'import asyncio\n'), ((11712, 11757), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (11733, 11757), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12467, 12531), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (12477, 12531), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13405, 13429), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (13427, 13429), False, 'import asyncio\n'), ((15612, 15636), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15634, 15636), False, 'import asyncio\n'), ((18734, 18765), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (18755, 18765), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3807, 3829), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3813, 3829), True, 'import numpy as np\n'), ((11946, 11958), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11956, 11958), False, 'import uuid\n'), ((19408, 19551), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (19416, 19551), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler",
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler"
] | [((990, 1025), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1010, 1025), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1083, 1126), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1111, 1126), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1184, 1229), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1214, 1229), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1283, 1324), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1309, 1324), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1380, 1413), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1398, 1413), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1464, 1495), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1480, 1495), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler",
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler"
] | [((990, 1025), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1010, 1025), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1083, 1126), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1111, 1126), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1184, 1229), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1214, 1229), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1283, 1324), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1309, 1324), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1380, 1413), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1398, 1413), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1464, 1495), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1480, 1495), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler",
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler"
] | [((990, 1025), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1010, 1025), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1083, 1126), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1111, 1126), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1184, 1229), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1214, 1229), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1283, 1324), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1309, 1324), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1380, 1413), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1398, 1413), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1464, 1495), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1480, 1495), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.legacy.bridge.pydantic import BaseModel # type: ignore
from llama_index.legacy.callbacks.schema import CBEventType, EventPayload
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.indices.query.schema import QueryBundle
from llama_index.legacy.prompts.mixin import PromptDictType
from llama_index.legacy.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.legacy.types import RESPONSE_TEXT_TYPE
from llama_index.legacy.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.legacy.core.response.schema.Response",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.indices.query.schema.QueryBundle",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service"
] | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((2809, 2842), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2840, 2842), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4901, 4950), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4905, 4950), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6844, 6870), 'llama_index.legacy.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6852, 6870), False, 'from llama_index.legacy.core.response.schema import Response\n'), ((6927, 6955), 'llama_index.legacy.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6938, 6955), False, 'from llama_index.legacy.indices.query.schema import QueryBundle\n'), ((8366, 8388), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8374, 8388), False, 'from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.legacy.bridge.pydantic import BaseModel # type: ignore
from llama_index.legacy.callbacks.schema import CBEventType, EventPayload
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.indices.query.schema import QueryBundle
from llama_index.legacy.prompts.mixin import PromptDictType
from llama_index.legacy.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.legacy.types import RESPONSE_TEXT_TYPE
from llama_index.legacy.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.legacy.core.response.schema.Response",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.indices.query.schema.QueryBundle",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service"
] | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((2809, 2842), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2840, 2842), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4901, 4950), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4905, 4950), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6844, 6870), 'llama_index.legacy.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6852, 6870), False, 'from llama_index.legacy.core.response.schema import Response\n'), ((6927, 6955), 'llama_index.legacy.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6938, 6955), False, 'from llama_index.legacy.indices.query.schema import QueryBundle\n'), ((8366, 8388), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8374, 8388), False, 'from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.legacy.bridge.pydantic import PrivateAttr
from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode
from llama_index.legacy.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.legacy.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(BasePydanticVectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
index_name: str
es_client: Optional[Any]
es_url: Optional[str]
es_cloud_id: Optional[str]
es_api_key: Optional[str]
es_user: Optional[str]
es_password: Optional[str]
text_field: str = "content"
vector_field: str = "embedding"
batch_size: int = 200
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE"
_client = PrivateAttr()
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
super().__init__(
index_name=index_name,
es_client=es_client,
es_url=es_url,
es_cloud_id=es_cloud_id,
es_api_key=es_api_key,
es_user=es_user,
es_password=es_password,
text_field=text_field,
vector_field=vector_field,
batch_size=batch_size,
distance_strategy=distance_strategy,
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index.legacy
return f"llama_index-py-vs/{llama_index.legacy.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships") or {}
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.vector_stores.utils.node_to_metadata_dict",
"llama_index.legacy.vector_stores.utils.metadata_dict_to_node"
] | [((640, 659), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (649, 659), False, 'from logging import getLogger\n'), ((2343, 2396), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2375, 2396), False, 'import elasticsearch\n'), ((3719, 3735), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3727, 3735), True, 'import numpy as np\n'), ((5274, 5287), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (5285, 5287), False, 'from llama_index.legacy.bridge.pydantic import PrivateAttr\n'), ((5793, 5813), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5811, 5813), False, 'import nest_asyncio\n'), ((17423, 17463), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (17427, 17463), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12909, 12984), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12919, 12984), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((10374, 10398), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (10396, 10398), False, 'import asyncio\n'), ((12301, 12346), 'llama_index.legacy.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (12322, 12346), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13056, 13120), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (13066, 13120), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13994, 14018), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (14016, 14018), False, 'import asyncio\n'), ((16201, 16225), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (16223, 16225), False, 'import asyncio\n'), ((19323, 19354), 'llama_index.legacy.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (19344, 19354), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3771, 3793), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3777, 3793), True, 'import numpy as np\n'), ((12535, 12547), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12545, 12547), False, 'import uuid\n'), ((20003, 20146), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (20011, 20146), False, 'from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.legacy.bridge.pydantic import PrivateAttr
from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode
from llama_index.legacy.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.legacy.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(BasePydanticVectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
index_name: str
es_client: Optional[Any]
es_url: Optional[str]
es_cloud_id: Optional[str]
es_api_key: Optional[str]
es_user: Optional[str]
es_password: Optional[str]
text_field: str = "content"
vector_field: str = "embedding"
batch_size: int = 200
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE"
_client = PrivateAttr()
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
super().__init__(
index_name=index_name,
es_client=es_client,
es_url=es_url,
es_cloud_id=es_cloud_id,
es_api_key=es_api_key,
es_user=es_user,
es_password=es_password,
text_field=text_field,
vector_field=vector_field,
batch_size=batch_size,
distance_strategy=distance_strategy,
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index.legacy
return f"llama_index-py-vs/{llama_index.legacy.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships") or {}
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.vector_stores.utils.node_to_metadata_dict",
"llama_index.legacy.vector_stores.utils.metadata_dict_to_node"
] | [((640, 659), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (649, 659), False, 'from logging import getLogger\n'), ((2343, 2396), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2375, 2396), False, 'import elasticsearch\n'), ((3719, 3735), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3727, 3735), True, 'import numpy as np\n'), ((5274, 5287), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (5285, 5287), False, 'from llama_index.legacy.bridge.pydantic import PrivateAttr\n'), ((5793, 5813), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5811, 5813), False, 'import nest_asyncio\n'), ((17423, 17463), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (17427, 17463), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12909, 12984), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12919, 12984), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((10374, 10398), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (10396, 10398), False, 'import asyncio\n'), ((12301, 12346), 'llama_index.legacy.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (12322, 12346), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13056, 13120), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (13066, 13120), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13994, 14018), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (14016, 14018), False, 'import asyncio\n'), ((16201, 16225), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (16223, 16225), False, 'import asyncio\n'), ((19323, 19354), 'llama_index.legacy.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (19344, 19354), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3771, 3793), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3777, 3793), True, 'import numpy as np\n'), ((12535, 12547), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12545, 12547), False, 'import uuid\n'), ((20003, 20146), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (20011, 20146), False, 'from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Google Generative AI Vector Store.
The GenAI Semantic Retriever API is a managed end-to-end service that allows
developers to create a corpus of documents to perform semantic search on
related passages given a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast
from llama_index.core.bridge.pydantic import ( # type: ignore
BaseModel,
Field,
PrivateAttr,
)
from llama_index.core.schema import BaseNode, RelatedNodeInfo, TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from google.auth import credentials
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_default_doc_id = "default-doc"
"""Google GenerativeAI service context.
Use this to provide the correct service context for `GoogleVectorStore`.
See the docstring for `GoogleVectorStore` for usage example.
"""
def set_google_config(
*,
api_endpoint: Optional[str] = None,
user_agent: Optional[str] = None,
page_size: Optional[int] = None,
auth_credentials: Optional["credentials.Credentials"] = None,
**kwargs: Any,
) -> None:
"""
Set the configuration for Google Generative AI API.
Parameters are optional, Normally, the defaults should work fine.
If provided, they will override the default values in the Config class.
See the docstring in `genai_extension.py` for more details.
auth_credentials: Optional["credentials.Credentials"] = None,
Use this to pass Google Auth credentials such as using a service account.
Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Example:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
"/path/to/service.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
config_attrs = {
"api_endpoint": api_endpoint,
"user_agent": user_agent,
"page_size": page_size,
"auth_credentials": auth_credentials,
"testing": kwargs.get("testing", None),
}
attrs = {k: v for k, v in config_attrs.items() if v is not None}
config = genaix.Config(**attrs)
genaix.set_config(config)
class NoSuchCorpusException(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
class GoogleVectorStore(BasePydanticVectorStore):
"""Google GenerativeAI Vector Store.
Currently, it computes the embedding vectors on the server side.
Example:
google_vector_store = GoogleVectorStore.from_corpus(
corpus_id="my-corpus-id",
include_metadata=True,
metadata_keys=['file_name', 'creation_date']
)
index = VectorStoreIndex.from_vector_store(
vector_store=google_vector_store
)
Attributes:
corpus_id: The corpus ID that this vector store instance will read and
write to.
include_metadata (bool): Indicates whether to include custom metadata in the query
results. Defaults to False.
metadata_keys (Optional[List[str]]): Specifies which metadata keys to include in the
query results if include_metadata is set to True. If None, all metadata keys
are included. Defaults to None.
"""
# Semantic Retriever stores the document node's text as string and embeds
# the vectors on the server automatically.
stores_text: bool = True
is_embedding_query: bool = False
# This is not the Google's corpus name but an ID generated in the LlamaIndex
# world.
corpus_id: str = Field(frozen=True)
"""Corpus ID that this instance of the vector store is using."""
# Configuration options for handling metadata in query results
include_metadata: bool = False
metadata_keys: Optional[List[str]] = None
_client: Any = PrivateAttr()
def __init__(self, *, client: Any, **kwargs: Any):
"""Raw constructor.
Use the class method `from_corpus` or `create_corpus` instead.
Args:
client: The low-level retriever class from google.ai.generativelanguage.
"""
try:
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(**kwargs)
assert isinstance(client, genai.RetrieverServiceClient)
self._client = client
@classmethod
def from_corpus(
cls,
*,
corpus_id: str,
include_metadata: bool = False,
metadata_keys: Optional[List[str]] = None,
) -> "GoogleVectorStore":
"""Create an instance that points to an existing corpus.
Args:
corpus_id (str): ID of an existing corpus on Google's server.
include_metadata (bool, optional): Specifies whether to include custom metadata in the
query results. Defaults to False, meaning metadata will not be included.
metadata_keys (Optional[List[str]], optional): Specifies which metadata keys to include
in the query results if include_metadata is set to True. If None, all metadata keys
are included. Defaults to None.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
NoSuchCorpusException if no such corpus is found.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.from_corpus(corpus_id={corpus_id})")
client = genaix.build_semantic_retriever()
if genaix.get_corpus(corpus_id=corpus_id, client=client) is None:
raise NoSuchCorpusException(corpus_id=corpus_id)
return cls(
corpus_id=corpus_id,
client=client,
include_metadata=include_metadata,
metadata_keys=metadata_keys,
)
@classmethod
def create_corpus(
cls, *, corpus_id: Optional[str] = None, display_name: Optional[str] = None
) -> "GoogleVectorStore":
"""Create an instance that points to a newly created corpus.
Examples:
store = GoogleVectorStore.create_corpus()
print(f"Created corpus with ID: {store.corpus_id})
store = GoogleVectorStore.create_corpus(
display_name="My first corpus"
)
store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus-1",
display_name="My first corpus"
)
Args:
corpus_id: ID of the new corpus to be created. If not provided,
Google server will provide one for you.
display_name: Title of the corpus. If not provided, Google server
will provide one for you.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
An exception if the corpus already exists or the user hits the
quota limit.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(
f"\n\nGoogleVectorStore.create_corpus(new_corpus_id={corpus_id}, new_display_name={display_name})"
)
client = genaix.build_semantic_retriever()
new_corpus_id = corpus_id or str(uuid.uuid4())
new_corpus = genaix.create_corpus(
corpus_id=new_corpus_id, display_name=display_name, client=client
)
name = genaix.EntityName.from_str(new_corpus.name)
return cls(corpus_id=name.corpus_id, client=client)
@classmethod
def class_name(cls) -> str:
return "GoogleVectorStore"
@property
def client(self) -> Any:
return self._client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add nodes with embedding to vector store.
If a node has a source node, the source node's ID will be used to create
a document. Otherwise, a default document for that corpus will be used
to house the node.
Furthermore, if the source node has a metadata field "file_name", it
will be used as the title of the document. If the source node has no
such field, Google server will assign a title to the document.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.add([
TextNode(
text="Hello, my darling",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
TextNode(
text="Goodbye, my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
])
The above code will create one document with ID `doc-456` and title
`Title for doc-456`. This document will house both nodes.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.add(nodes={nodes})")
client = cast(genai.RetrieverServiceClient, self.client)
created_node_ids: List[str] = []
for nodeGroup in _group_nodes_by_source(nodes):
source = nodeGroup.source_node
document_id = source.node_id
document = genaix.get_document(
corpus_id=self.corpus_id, document_id=document_id, client=client
)
if not document:
genaix.create_document(
corpus_id=self.corpus_id,
display_name=source.metadata.get("file_name", None),
document_id=document_id,
metadata=source.metadata,
client=client,
)
created_chunks = genaix.batch_create_chunk(
corpus_id=self.corpus_id,
document_id=document_id,
texts=[node.get_content() for node in nodeGroup.nodes],
metadatas=[node.metadata for node in nodeGroup.nodes],
client=client,
)
created_node_ids.extend([chunk.name for chunk in created_chunks])
return created_node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes by ref_doc_id.
Both the underlying nodes and the document will be deleted from Google
server.
Args:
ref_doc_id: The document ID to be deleted.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.delete(ref_doc_id={ref_doc_id})")
client = cast(genai.RetrieverServiceClient, self.client)
genaix.delete_document(
corpus_id=self.corpus_id, document_id=ref_doc_id, client=client
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
# Only nodes with this author.
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
# Only from these docs. If not provided,
# the entire corpus is searched.
doc_ids=["doc-456"],
similarity_top_k=3,
)
)
Args:
query: See `llama_index.core.vector_stores.types.VectorStoreQuery`.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.query(query={query})")
query_str = query.query_str
if query_str is None:
raise ValueError("VectorStoreQuery.query_str should not be None.")
client = cast(genai.RetrieverServiceClient, self.client)
relevant_chunks: List[genai.RelevantChunk] = []
if query.doc_ids is None:
# The chunks from query_corpus should be sorted in reverse order by
# relevant score.
relevant_chunks = genaix.query_corpus(
corpus_id=self.corpus_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
else:
for doc_id in query.doc_ids:
relevant_chunks.extend(
genaix.query_document(
corpus_id=self.corpus_id,
document_id=doc_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
)
# Make sure the chunks are reversed sorted according to relevant
# scores even across multiple documents.
relevant_chunks.sort(key=lambda c: c.chunk_relevance_score, reverse=True)
nodes = []
include_metadata = self.include_metadata
metadata_keys = self.metadata_keys
for chunk in relevant_chunks:
metadata = {}
if include_metadata:
for custom_metadata in chunk.chunk.custom_metadata:
# Use getattr to safely extract values
value = getattr(custom_metadata, "string_value", None)
if (
value is None
): # If string_value is not set, check for numeric_value
value = getattr(custom_metadata, "numeric_value", None)
# Add to the metadata dictionary only those keys that are present in metadata_keys
if value is not None and (
metadata_keys is None or custom_metadata.key in metadata_keys
):
metadata[custom_metadata.key] = value
text_node = TextNode(
text=chunk.chunk.data.string_value,
id=_extract_chunk_id(chunk.chunk.name),
metadata=metadata, # Adding metadata to the node
)
nodes.append(text_node)
return VectorStoreQueryResult(
nodes=nodes,
ids=[_extract_chunk_id(chunk.chunk.name) for chunk in relevant_chunks],
similarities=[chunk.chunk_relevance_score for chunk in relevant_chunks],
)
def _extract_chunk_id(entity_name: str) -> str:
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
id = genaix.EntityName.from_str(entity_name).chunk_id
assert id is not None
return id
class _NodeGroup(BaseModel):
"""Every node in nodes have the same source node."""
source_node: RelatedNodeInfo
nodes: List[BaseNode]
def _group_nodes_by_source(nodes: Sequence[BaseNode]) -> List[_NodeGroup]:
"""Returns a list of lists of nodes where each list has all the nodes
from the same document.
"""
groups: Dict[str, _NodeGroup] = {}
for node in nodes:
source_node: RelatedNodeInfo
if isinstance(node.source_node, RelatedNodeInfo):
source_node = node.source_node
else:
source_node = RelatedNodeInfo(node_id=_default_doc_id)
if source_node.node_id not in groups:
groups[source_node.node_id] = _NodeGroup(source_node=source_node, nodes=[])
groups[source_node.node_id].nodes.append(node)
return list(groups.values())
def _convert_filter(fs: Optional[MetadataFilters]) -> Dict[str, Any]:
if fs is None:
return {}
assert isinstance(fs, MetadataFilters)
return {f.key: f.value for f in fs.filters}
| [
"llama_index.vector_stores.google.genai_extension.build_semantic_retriever",
"llama_index.vector_stores.google.genai_extension.get_corpus",
"llama_index.vector_stores.google.genai_extension.Config",
"llama_index.vector_stores.google.genai_extension.EntityName.from_str",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.schema.RelatedNodeInfo",
"llama_index.vector_stores.google.genai_extension.get_document",
"llama_index.vector_stores.google.genai_extension.delete_document",
"llama_index.core.bridge.pydantic.PrivateAttr",
"llama_index.vector_stores.google.genai_extension.set_config",
"llama_index.vector_stores.google.genai_extension.create_corpus"
] | [((812, 839), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (829, 839), False, 'import logging\n'), ((2859, 2881), 'llama_index.vector_stores.google.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (2872, 2881), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((2886, 2911), 'llama_index.vector_stores.google.genai_extension.set_config', 'genaix.set_config', (['config'], {}), '(config)\n', (2903, 2911), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((4343, 4361), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4348, 4361), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((4600, 4613), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (4611, 4613), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((6413, 6446), 'llama_index.vector_stores.google.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (6444, 6446), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8216, 8249), 'llama_index.vector_stores.google.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (8247, 8249), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8326, 8417), 'llama_index.vector_stores.google.genai_extension.create_corpus', 'genaix.create_corpus', ([], {'corpus_id': 'new_corpus_id', 'display_name': 'display_name', 'client': 'client'}), '(corpus_id=new_corpus_id, display_name=display_name,\n client=client)\n', (8346, 8417), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8451, 8494), 'llama_index.vector_stores.google.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['new_corpus.name'], {}), '(new_corpus.name)\n', (8477, 8494), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((10566, 10613), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (10570, 10613), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((12317, 12364), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (12321, 12364), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((12373, 12464), 'llama_index.vector_stores.google.genai_extension.delete_document', 'genaix.delete_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'ref_doc_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=ref_doc_id,\n client=client)\n', (12395, 12464), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((13968, 14015), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (13972, 14015), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((16832, 16871), 'llama_index.vector_stores.google.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['entity_name'], {}), '(entity_name)\n', (16858, 16871), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((6458, 6511), 'llama_index.vector_stores.google.genai_extension.get_corpus', 'genaix.get_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (6475, 6511), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((10819, 10908), 'llama_index.vector_stores.google.genai_extension.get_document', 'genaix.get_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'document_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=document_id,\n client=client)\n', (10838, 10908), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((17496, 17536), 'llama_index.core.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '_default_doc_id'}), '(node_id=_default_doc_id)\n', (17511, 17536), False, 'from llama_index.core.schema import BaseNode, RelatedNodeInfo, TextNode\n'), ((8291, 8303), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8301, 8303), False, 'import uuid\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.bridge.pydantic import BaseModel # type: ignore
from llama_index.callbacks.schema import CBEventType, EventPayload
from llama_index.core.response.schema import Response
from llama_index.indices.query.schema import QueryBundle
from llama_index.prompts.mixin import PromptDictType
from llama_index.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.types import RESPONSE_TEXT_TYPE
from llama_index.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.indices.query.schema.QueryBundle",
"llama_index.schema.TextNode",
"llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service",
"llama_index.core.response.schema.Response"
] | [((1051, 1078), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1068, 1078), False, 'import logging\n'), ((2739, 2772), 'llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2770, 2772), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4824, 4873), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4828, 4873), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6767, 6793), 'llama_index.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6775, 6793), False, 'from llama_index.core.response.schema import Response\n'), ((6850, 6878), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6861, 6878), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((8289, 8311), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8297, 8311), False, 'from llama_index.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.bridge.pydantic import BaseModel # type: ignore
from llama_index.callbacks.schema import CBEventType, EventPayload
from llama_index.core.response.schema import Response
from llama_index.indices.query.schema import QueryBundle
from llama_index.prompts.mixin import PromptDictType
from llama_index.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.types import RESPONSE_TEXT_TYPE
from llama_index.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.indices.query.schema.QueryBundle",
"llama_index.schema.TextNode",
"llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service",
"llama_index.core.response.schema.Response"
] | [((1051, 1078), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1068, 1078), False, 'import logging\n'), ((2739, 2772), 'llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2770, 2772), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4824, 4873), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4828, 4873), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6767, 6793), 'llama_index.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6775, 6793), False, 'from llama_index.core.response.schema import Response\n'), ((6850, 6878), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6861, 6878), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((8289, 8311), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8297, 8311), False, 'from llama_index.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from private_gpt.di import global_injector
from private_gpt.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launcher import create_app\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from private_gpt.di import global_injector
from private_gpt.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launcher import create_app\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from private_gpt.di import global_injector
from private_gpt.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launcher import create_app\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from private_gpt.di import global_injector
from private_gpt.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launcher import create_app\n')] |
"""
Astra DB Vector store index.
An index based on a DB table with vector search capabilities,
powered by the astrapy library
"""
import json
import logging
from typing import Any, Dict, List, Optional, cast
from warnings import warn
import llama_index.core
from llama_index.core.bridge.pydantic import PrivateAttr
from astrapy.db import AstraDB
from llama_index.core.indices.query.embedding_utils import get_top_k_mmr_embeddings
from llama_index.core.schema import BaseNode, MetadataMode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
ExactMatchFilter,
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
_logger = logging.getLogger(__name__)
DEFAULT_MMR_PREFETCH_FACTOR = 4.0
MAX_INSERT_BATCH_SIZE = 20
NON_INDEXED_FIELDS = ["metadata._node_content", "content"]
class AstraDBVectorStore(BasePydanticVectorStore):
"""
Astra DB Vector Store.
An abstraction of a Astra table with
vector-similarity-search. Documents, and their embeddings, are stored
in an Astra table and a vector-capable index is used for searches.
The table does not need to exist beforehand: if necessary it will
be created behind the scenes.
All Astra operations are done through the astrapy library.
Args:
collection_name (str): collection name to use. If not existing, it will be created.
token (str): The Astra DB Application Token to use.
api_endpoint (str): The Astra DB JSON API endpoint for your database.
embedding_dimension (int): length of the embedding vectors in use.
namespace (Optional[str]): The namespace to use. If not provided, 'default_keyspace'
ttl_seconds (Optional[int]): expiration time for inserted entries.
Default is no expiration.
"""
stores_text: bool = True
flat_metadata: bool = True
_embedding_dimension: int = PrivateAttr()
_ttl_seconds: Optional[int] = PrivateAttr()
_astra_db: Any = PrivateAttr()
_astra_db_collection: Any = PrivateAttr()
def __init__(
self,
*,
collection_name: str,
token: str,
api_endpoint: str,
embedding_dimension: int,
namespace: Optional[str] = None,
ttl_seconds: Optional[int] = None,
) -> None:
super().__init__()
# Set all the required class parameters
self._embedding_dimension = embedding_dimension
self._ttl_seconds = ttl_seconds
_logger.debug("Creating the Astra DB table")
# Build the Astra DB object
self._astra_db = AstraDB(
api_endpoint=api_endpoint,
token=token,
namespace=namespace,
caller_name=getattr(llama_index, "__name__", "llama_index"),
caller_version=getattr(llama_index.core, "__version__", None),
)
from astrapy.api import APIRequestError
try:
# Create and connect to the newly created collection
self._astra_db_collection = self._astra_db.create_collection(
collection_name=collection_name,
dimension=embedding_dimension,
options={"indexing": {"deny": NON_INDEXED_FIELDS}},
)
except APIRequestError:
# possibly the collection is preexisting and has legacy
# indexing settings: verify
get_coll_response = self._astra_db.get_collections(
options={"explain": True}
)
collections = (get_coll_response["status"] or {}).get("collections") or []
preexisting = [
collection
for collection in collections
if collection["name"] == collection_name
]
if preexisting:
pre_collection = preexisting[0]
# if it has no "indexing", it is a legacy collection;
# otherwise it's unexpected warn and proceed at user's risk
pre_col_options = pre_collection.get("options") or {}
if "indexing" not in pre_col_options:
warn(
(
f"Collection '{collection_name}' is detected as "
"having indexing turned on for all fields "
"(either created manually or by older versions "
"of this plugin). This implies stricter "
"limitations on the amount of text"
" each entry can store. Consider reindexing anew on a"
" fresh collection to be able to store longer texts."
),
UserWarning,
stacklevel=2,
)
self._astra_db_collection = self._astra_db.collection(
collection_name=collection_name,
)
else:
options_json = json.dumps(pre_col_options["indexing"])
warn(
(
f"Collection '{collection_name}' has unexpected 'indexing'"
f" settings (options.indexing = {options_json})."
" This can result in odd behaviour when running "
" metadata filtering and/or unwarranted limitations"
" on storing long texts. Consider reindexing anew on a"
" fresh collection."
),
UserWarning,
stacklevel=2,
)
self._astra_db_collection = self._astra_db.collection(
collection_name=collection_name,
)
else:
# other exception
raise
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of node with embeddings
"""
# Initialize list of objects to track
nodes_list = []
# Process each node individually
for node in nodes:
# Get the metadata
metadata = node_to_metadata_dict(
node,
remove_text=True,
flat_metadata=self.flat_metadata,
)
# One dictionary of node data per node
nodes_list.append(
{
"_id": node.node_id,
"content": node.get_content(metadata_mode=MetadataMode.NONE),
"metadata": metadata,
"$vector": node.get_embedding(),
}
)
# Log the number of rows being added
_logger.debug(f"Adding {len(nodes_list)} rows to table")
# Initialize an empty list to hold the batches
batched_list = []
# Iterate over the node_list in steps of MAX_INSERT_BATCH_SIZE
for i in range(0, len(nodes_list), MAX_INSERT_BATCH_SIZE):
# Append a slice of node_list to the batched_list
batched_list.append(nodes_list[i : i + MAX_INSERT_BATCH_SIZE])
# Perform the bulk insert
for i, batch in enumerate(batched_list):
_logger.debug(f"Processing batch #{i + 1} of size {len(batch)}")
# Go to astrapy to perform the bulk insert
self._astra_db_collection.insert_many(batch)
# Return the list of ids
return [str(n["_id"]) for n in nodes_list]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The id of the document to delete.
"""
_logger.debug("Deleting a document from the Astra table")
self._astra_db_collection.delete(id=ref_doc_id, **delete_kwargs)
@property
def client(self) -> Any:
"""Return the underlying Astra vector table object."""
return self._astra_db_collection
@staticmethod
def _query_filters_to_dict(query_filters: MetadataFilters) -> Dict[str, Any]:
# Allow only legacy ExactMatchFilter and MetadataFilter with FilterOperator.EQ
if not all(
(
isinstance(f, ExactMatchFilter)
or (isinstance(f, MetadataFilter) and f.operator == FilterOperator.EQ)
)
for f in query_filters.filters
):
raise NotImplementedError(
"Only filters with operator=FilterOperator.EQ are supported"
)
return {f"metadata.{f.key}": f.value for f in query_filters.filters}
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
# Get the currently available query modes
_available_query_modes = [
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.MMR,
]
# Reject query if not available
if query.mode not in _available_query_modes:
raise NotImplementedError(f"Query mode {query.mode} not available.")
# Get the query embedding
query_embedding = cast(List[float], query.query_embedding)
# Process the metadata filters as needed
if query.filters is not None:
query_metadata = self._query_filters_to_dict(query.filters)
else:
query_metadata = {}
# Get the scores depending on the query mode
if query.mode == VectorStoreQueryMode.DEFAULT:
# Call the vector_find method of AstraPy
matches = self._astra_db_collection.vector_find(
vector=query_embedding,
limit=query.similarity_top_k,
filter=query_metadata,
)
# Get the scores associated with each
top_k_scores = [match["$similarity"] for match in matches]
elif query.mode == VectorStoreQueryMode.MMR:
# Querying a larger number of vectors and then doing MMR on them.
if (
kwargs.get("mmr_prefetch_factor") is not None
and kwargs.get("mmr_prefetch_k") is not None
):
raise ValueError(
"'mmr_prefetch_factor' and 'mmr_prefetch_k' "
"cannot coexist in a call to query()"
)
else:
if kwargs.get("mmr_prefetch_k") is not None:
prefetch_k0 = int(kwargs["mmr_prefetch_k"])
else:
prefetch_k0 = int(
query.similarity_top_k
* kwargs.get("mmr_prefetch_factor", DEFAULT_MMR_PREFETCH_FACTOR)
)
# Get the most we can possibly need to fetch
prefetch_k = max(prefetch_k0, query.similarity_top_k)
# Call AstraPy to fetch them
prefetch_matches = self._astra_db_collection.vector_find(
vector=query_embedding,
limit=prefetch_k,
filter=query_metadata,
)
# Get the MMR threshold
mmr_threshold = query.mmr_threshold or kwargs.get("mmr_threshold")
# If we have found documents, we can proceed
if prefetch_matches:
zipped_indices, zipped_embeddings = zip(
*enumerate(match["$vector"] for match in prefetch_matches)
)
pf_match_indices, pf_match_embeddings = list(zipped_indices), list(
zipped_embeddings
)
else:
pf_match_indices, pf_match_embeddings = [], []
# Call the Llama utility function to get the top k
mmr_similarities, mmr_indices = get_top_k_mmr_embeddings(
query_embedding,
pf_match_embeddings,
similarity_top_k=query.similarity_top_k,
embedding_ids=pf_match_indices,
mmr_threshold=mmr_threshold,
)
# Finally, build the final results based on the mmr values
matches = [prefetch_matches[mmr_index] for mmr_index in mmr_indices]
top_k_scores = mmr_similarities
# We have three lists to return
top_k_nodes = []
top_k_ids = []
# Get every match
for match in matches:
# Check whether we have a llama-generated node content field
if "_node_content" not in match["metadata"]:
match["metadata"]["_node_content"] = json.dumps(match)
# Create a new node object from the node metadata
node = metadata_dict_to_node(match["metadata"], text=match["content"])
# Append to the respective lists
top_k_nodes.append(node)
top_k_ids.append(match["_id"])
# return our final result
return VectorStoreQueryResult(
nodes=top_k_nodes,
similarities=top_k_scores,
ids=top_k_ids,
)
| [
"llama_index.core.vector_stores.utils.metadata_dict_to_node",
"llama_index.core.vector_stores.utils.node_to_metadata_dict",
"llama_index.core.indices.query.embedding_utils.get_top_k_mmr_embeddings",
"llama_index.core.bridge.pydantic.PrivateAttr",
"llama_index.core.vector_stores.types.VectorStoreQueryResult"
] | [((852, 879), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (869, 879), False, 'import logging\n'), ((2070, 2083), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2081, 2083), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2118, 2131), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2129, 2131), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2153, 2166), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2164, 2166), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2199, 2212), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2210, 2212), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((9525, 9565), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (9529, 9565), False, 'from typing import Any, Dict, List, Optional, cast\n'), ((13259, 13347), 'llama_index.core.vector_stores.types.VectorStoreQueryResult', 'VectorStoreQueryResult', ([], {'nodes': 'top_k_nodes', 'similarities': 'top_k_scores', 'ids': 'top_k_ids'}), '(nodes=top_k_nodes, similarities=top_k_scores, ids=\n top_k_ids)\n', (13281, 13347), False, 'from llama_index.core.vector_stores.types import BasePydanticVectorStore, ExactMatchFilter, FilterOperator, MetadataFilter, MetadataFilters, VectorStoreQuery, VectorStoreQueryMode, VectorStoreQueryResult\n'), ((6510, 6589), 'llama_index.core.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (6531, 6589), False, 'from llama_index.core.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13019, 13082), 'llama_index.core.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (["match['metadata']"], {'text': "match['content']"}), "(match['metadata'], text=match['content'])\n", (13040, 13082), False, 'from llama_index.core.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12133, 12305), 'llama_index.core.indices.query.embedding_utils.get_top_k_mmr_embeddings', 'get_top_k_mmr_embeddings', (['query_embedding', 'pf_match_embeddings'], {'similarity_top_k': 'query.similarity_top_k', 'embedding_ids': 'pf_match_indices', 'mmr_threshold': 'mmr_threshold'}), '(query_embedding, pf_match_embeddings,\n similarity_top_k=query.similarity_top_k, embedding_ids=pf_match_indices,\n mmr_threshold=mmr_threshold)\n', (12157, 12305), False, 'from llama_index.core.indices.query.embedding_utils import get_top_k_mmr_embeddings\n'), ((12919, 12936), 'json.dumps', 'json.dumps', (['match'], {}), '(match)\n', (12929, 12936), False, 'import json\n'), ((4284, 4638), 'warnings.warn', 'warn', (['f"""Collection \'{collection_name}\' is detected as having indexing turned on for all fields (either created manually or by older versions of this plugin). This implies stricter limitations on the amount of text each entry can store. Consider reindexing anew on a fresh collection to be able to store longer texts."""', 'UserWarning'], {'stacklevel': '(2)'}), '(\n f"Collection \'{collection_name}\' is detected as having indexing turned on for all fields (either created manually or by older versions of this plugin). This implies stricter limitations on the amount of text each entry can store. Consider reindexing anew on a fresh collection to be able to store longer texts."\n , UserWarning, stacklevel=2)\n', (4288, 4638), False, 'from warnings import warn\n'), ((5177, 5216), 'json.dumps', 'json.dumps', (["pre_col_options['indexing']"], {}), "(pre_col_options['indexing'])\n", (5187, 5216), False, 'import json\n'), ((5237, 5553), 'warnings.warn', 'warn', (['f"""Collection \'{collection_name}\' has unexpected \'indexing\' settings (options.indexing = {options_json}). This can result in odd behaviour when running metadata filtering and/or unwarranted limitations on storing long texts. Consider reindexing anew on a fresh collection."""', 'UserWarning'], {'stacklevel': '(2)'}), '(\n f"Collection \'{collection_name}\' has unexpected \'indexing\' settings (options.indexing = {options_json}). This can result in odd behaviour when running metadata filtering and/or unwarranted limitations on storing long texts. Consider reindexing anew on a fresh collection."\n , UserWarning, stacklevel=2)\n', (5241, 5553), False, 'from warnings import warn\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.core.base.response.schema import Response
from llama_index.core.bridge.pydantic import BaseModel # type: ignore
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.query.schema import QueryBundle
from llama_index.core.llms.mock import MockLLM
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.core.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.core.types import RESPONSE_TEXT_TYPE
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
llm=MockLLM(),
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.vector_stores.google.genai_extension.build_generative_service",
"llama_index.core.schema.TextNode",
"llama_index.core.llms.mock.MockLLM",
"llama_index.core.base.response.schema.Response",
"llama_index.core.indices.query.schema.QueryBundle"
] | [((1057, 1084), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1074, 1084), False, 'import logging\n'), ((2707, 2740), 'llama_index.vector_stores.google.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2738, 2740), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((4779, 4828), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4783, 4828), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6722, 6748), 'llama_index.core.base.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6730, 6748), False, 'from llama_index.core.base.response.schema import Response\n'), ((6805, 6833), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6816, 6833), False, 'from llama_index.core.indices.query.schema import QueryBundle\n'), ((2596, 2605), 'llama_index.core.llms.mock.MockLLM', 'MockLLM', ([], {}), '()\n', (2603, 2605), False, 'from llama_index.core.llms.mock import MockLLM\n'), ((8244, 8266), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8252, 8266), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, TextNode\n')] |
from unittest.mock import MagicMock, patch
import pytest
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.schema import Document
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.indices.managed.google.generativeai import (
GoogleIndex,
set_google_config,
)
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_from_corpus(mock_get_corpus: MagicMock) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
# Act
store = GoogleIndex.from_corpus(corpus_id="123")
# Assert
assert store.corpus_id == "123"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
def test_create_corpus(mock_create_corpus: MagicMock) -> None:
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_create_corpus.side_effect = fake_create_corpus
# Act
store = GoogleIndex.create_corpus(display_name="My first corpus")
# Assert
assert len(store.corpus_id) > 0
assert mock_create_corpus.call_count == 1
request = mock_create_corpus.call_args.args[0]
assert request.corpus.name == f"corpora/{store.corpus_id}"
assert request.corpus.display_name == "My first corpus"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_document")
@patch("google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_document")
def test_from_documents(
mock_get_document: MagicMock,
mock_batch_create_chunk: MagicMock,
mock_create_document: MagicMock,
mock_create_corpus: MagicMock,
) -> None:
from google.api_core import exceptions as gapi_exception
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_get_document.side_effect = gapi_exception.NotFound("")
mock_create_corpus.side_effect = fake_create_corpus
mock_create_document.return_value = genai.Document(name="corpora/123/documents/456")
mock_batch_create_chunk.side_effect = [
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/777"),
]
),
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/888"),
]
),
]
# Act
index = GoogleIndex.from_documents(
[
Document(text="Hello, my darling"),
Document(text="Goodbye, my baby"),
]
)
# Assert
assert mock_create_corpus.call_count == 1
create_corpus_request = mock_create_corpus.call_args.args[0]
assert create_corpus_request.corpus.name == f"corpora/{index.corpus_id}"
create_document_request = mock_create_document.call_args.args[0]
assert create_document_request.parent == f"corpora/{index.corpus_id}"
assert mock_batch_create_chunk.call_count == 2
first_batch_request = mock_batch_create_chunk.call_args_list[0].args[0]
assert (
first_batch_request.requests[0].chunk.data.string_value == "Hello, my darling"
)
second_batch_request = mock_batch_create_chunk.call_args_list[1].args[0]
assert (
second_batch_request.requests[0].chunk.data.string_value == "Goodbye, my baby"
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.query_corpus")
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_as_query_engine(
mock_get_corpus: MagicMock,
mock_generate_answer: MagicMock,
mock_query_corpus: MagicMock,
) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
mock_query_corpus.return_value = genai.QueryCorpusResponse(
relevant_chunks=[
genai.RelevantChunk(
chunk=genai.Chunk(
name="corpora/123/documents/456/chunks/789",
data=genai.ChunkData(string_value="It's 42"),
),
chunk_relevance_score=0.9,
)
]
)
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/777",
part_index=0,
)
),
),
genai.GroundingAttribution(
content=genai.Content(parts=[genai.Part(text="Or maybe not")]),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/888",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.9,
)
# Act
index = GoogleIndex.from_corpus(corpus_id="123")
query_engine = index.as_query_engine(
answer_style=genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE
)
response = query_engine.query("What is the meaning of life?")
# Assert
assert mock_query_corpus.call_count == 1
query_corpus_request = mock_query_corpus.call_args.args[0]
assert query_corpus_request.name == "corpora/123"
assert query_corpus_request.query == "What is the meaning of life?"
assert isinstance(response, Response)
assert response.response == "42"
assert mock_generate_answer.call_count == 1
generate_answer_request = mock_generate_answer.call_args.args[0]
assert (
generate_answer_request.contents[0].parts[0].text
== "What is the meaning of life?"
)
assert (
generate_answer_request.answer_style
== genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE
)
passages = generate_answer_request.inline_passages.passages
assert len(passages) == 1
passage = passages[0]
assert passage.content.parts[0].text == "It's 42"
| [
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config",
"llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.create_corpus",
"llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus",
"llama_index.legacy.schema.Document",
"llama_index.legacy.indices.managed.google.generativeai.set_google_config"
] | [((693, 752), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (711, 752), False, 'import pytest\n'), ((754, 798), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (759, 798), False, 'from unittest.mock import MagicMock, patch\n'), ((1012, 1071), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1030, 1071), False, 'import pytest\n'), ((1073, 1144), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (1078, 1144), False, 'from unittest.mock import MagicMock, patch\n'), ((1402, 1461), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1420, 1461), False, 'import pytest\n'), ((1463, 1537), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (1468, 1537), False, 'from unittest.mock import MagicMock, patch\n'), ((2137, 2196), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (2155, 2196), False, 'import pytest\n'), ((2198, 2272), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (2203, 2272), False, 'from unittest.mock import MagicMock, patch\n'), ((2274, 2350), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_document')\n", (2279, 2350), False, 'from unittest.mock import MagicMock, patch\n'), ((2352, 2437), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks'\n )\n", (2357, 2437), False, 'from unittest.mock import MagicMock, patch\n'), ((2434, 2507), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_document')\n", (2439, 2507), False, 'from unittest.mock import MagicMock, patch\n'), ((4398, 4457), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (4416, 4457), False, 'import pytest\n'), ((4459, 4532), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.query_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.query_corpus')\n", (4464, 4532), False, 'from unittest.mock import MagicMock, patch\n'), ((4534, 4611), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (4539, 4611), False, 'from unittest.mock import MagicMock, patch\n'), ((4613, 4684), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (4618, 4684), False, 'from unittest.mock import MagicMock, patch\n'), ((570, 671), 'llama_index.legacy.indices.managed.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (587, 671), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((868, 920), 'llama_index.legacy.indices.managed.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (885, 920), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((934, 953), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (951, 953), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((1252, 1284), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (1264, 1284), True, 'import google.ai.generativelanguage as genai\n'), ((1308, 1348), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (1331, 1348), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((1805, 1862), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.create_corpus', 'GoogleIndex.create_corpus', ([], {'display_name': '"""My first corpus"""'}), "(display_name='My first corpus')\n", (1830, 1862), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((2913, 2940), 'google.api_core.exceptions.NotFound', 'gapi_exception.NotFound', (['""""""'], {}), "('')\n", (2936, 2940), True, 'from google.api_core import exceptions as gapi_exception\n'), ((3037, 3085), 'google.ai.generativelanguage.Document', 'genai.Document', ([], {'name': '"""corpora/123/documents/456"""'}), "(name='corpora/123/documents/456')\n", (3051, 3085), True, 'import google.ai.generativelanguage as genai\n'), ((4874, 4906), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (4886, 4906), True, 'import google.ai.generativelanguage as genai\n'), ((6624, 6664), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (6647, 6664), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((3531, 3565), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': '"""Hello, my darling"""'}), "(text='Hello, my darling')\n", (3539, 3565), False, 'from llama_index.legacy.schema import Document\n'), ((3579, 3612), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': '"""Goodbye, my baby"""'}), "(text='Goodbye, my baby')\n", (3587, 3612), False, 'from llama_index.legacy.schema import Document\n'), ((3208, 3264), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/777"""'}), "(name='corpora/123/documents/456/chunks/777')\n", (3219, 3264), True, 'import google.ai.generativelanguage as genai\n'), ((3369, 3425), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/888"""'}), "(name='corpora/123/documents/456/chunks/888')\n", (3380, 3425), True, 'import google.ai.generativelanguage as genai\n'), ((5155, 5194), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""It\'s 42"""'}), '(string_value="It\'s 42")\n', (5170, 5194), True, 'import google.ai.generativelanguage as genai\n'), ((5431, 5452), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (5441, 5452), True, 'import google.ai.generativelanguage as genai\n'), ((5775, 5889), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/777"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/777', part_index=0)\n", (5819, 5889), True, 'import google.ai.generativelanguage as genai\n'), ((6237, 6351), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/888"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/888', part_index=0)\n", (6281, 6351), True, 'import google.ai.generativelanguage as genai\n'), ((5611, 5651), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42"""'}), "(text='Meaning of life is 42')\n", (5621, 5651), True, 'import google.ai.generativelanguage as genai\n'), ((6103, 6134), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Or maybe not"""'}), "(text='Or maybe not')\n", (6113, 6134), True, 'import google.ai.generativelanguage as genai\n')] |
from unittest.mock import MagicMock, patch
import pytest
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.response_synthesizers.google.generativeai import (
GoogleTextSynthesizer,
set_google_config,
)
from llama_index.legacy.schema import NodeWithScore, TextNode
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_get_response(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42.")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/789",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.7,
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.5,
answer_style=genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_setting=[
genai.SafetySetting(
category=genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
)
],
)
response = synthesizer.get_response(
query_str="What is the meaning of life?",
text_chunks=[
"It's 42",
],
)
# Assert
assert response.answer == "42"
assert response.attributed_passages == ["Meaning of life is 42."]
assert response.answerable_probability == pytest.approx(0.7)
assert mock_generate_answer.call_count == 1
request = mock_generate_answer.call_args.args[0]
assert request.contents[0].parts[0].text == "What is the meaning of life?"
assert request.answer_style == genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
assert len(request.safety_settings) == 1
assert (
request.safety_settings[0].category
== genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
)
assert (
request.safety_settings[0].threshold
== genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
)
assert request.temperature == 0.5
passages = request.inline_passages.passages
assert len(passages) == 1
passage = passages[0]
assert passage.content.parts[0].text == "It's 42"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/777",
part_index=0,
)
),
),
genai.GroundingAttribution(
content=genai.Content(parts=[genai.Part(text="Or maybe not")]),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/888",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.9,
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
response = synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
additional_source_nodes=[
NodeWithScore(
node=TextNode(text="Additional node"),
score=0.4,
),
],
)
# Assert
assert response.response == "42"
assert len(response.source_nodes) == 4
first_attributed_source = response.source_nodes[0]
assert first_attributed_source.node.text == "Meaning of life is 42"
assert first_attributed_source.score is None
second_attributed_source = response.source_nodes[1]
assert second_attributed_source.node.text == "Or maybe not"
assert second_attributed_source.score is None
first_input_source = response.source_nodes[2]
assert first_input_source.node.text == "It's 42"
assert first_input_source.score == pytest.approx(0.5)
first_additional_source = response.source_nodes[3]
assert first_additional_source.node.text == "Additional node"
assert first_additional_source.score == pytest.approx(0.4)
assert response.metadata is not None
assert response.metadata.get("answerable_probability", None) == pytest.approx(0.9)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_max_token_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.MAX_TOKENS,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "Maximum token" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_safety_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.SAFETY,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "safety" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_recitation_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.RECITATION,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "recitation" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_unknown_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.OTHER,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "Unexpected" in str(e.value)
| [
"llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.response_synthesizers.google.generativeai.set_google_config",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config"
] | [((663, 722), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (681, 722), False, 'import pytest\n'), ((724, 768), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (729, 768), False, 'from unittest.mock import MagicMock, patch\n'), ((982, 1041), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1000, 1041), False, 'import pytest\n'), ((1043, 1120), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (1048, 1120), False, 'from unittest.mock import MagicMock, patch\n'), ((3580, 3639), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (3598, 3639), False, 'import pytest\n'), ((3641, 3718), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (3646, 3718), False, 'from unittest.mock import MagicMock, patch\n'), ((6499, 6558), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (6517, 6558), False, 'import pytest\n'), ((6560, 6637), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (6565, 6637), False, 'from unittest.mock import MagicMock, patch\n'), ((7434, 7493), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (7452, 7493), False, 'import pytest\n'), ((7495, 7572), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (7500, 7572), False, 'from unittest.mock import MagicMock, patch\n'), ((8355, 8414), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (8373, 8414), False, 'import pytest\n'), ((8416, 8493), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (8421, 8493), False, 'from unittest.mock import MagicMock, patch\n'), ((9288, 9347), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (9306, 9347), False, 'import pytest\n'), ((9349, 9426), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (9354, 9426), False, 'from unittest.mock import MagicMock, patch\n'), ((540, 641), 'llama_index.legacy.response_synthesizers.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (557, 641), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((838, 890), 'llama_index.legacy.response_synthesizers.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (855, 890), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((904, 923), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (921, 923), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5137, 5174), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (5172, 5174), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((7037, 7074), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (7072, 7074), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((7965, 8002), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (8000, 8002), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((8894, 8931), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (8929, 8931), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((9819, 9856), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (9854, 9856), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((2786, 2804), 'pytest.approx', 'pytest.approx', (['(0.7)'], {}), '(0.7)\n', (2799, 2804), False, 'import pytest\n'), ((6163, 6181), 'pytest.approx', 'pytest.approx', (['(0.5)'], {}), '(0.5)\n', (6176, 6181), False, 'import pytest\n'), ((6348, 6366), 'pytest.approx', 'pytest.approx', (['(0.4)'], {}), '(0.4)\n', (6361, 6366), False, 'import pytest\n'), ((6477, 6495), 'pytest.approx', 'pytest.approx', (['(0.9)'], {}), '(0.9)\n', (6490, 6495), False, 'import pytest\n'), ((7084, 7108), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7097, 7108), False, 'import pytest\n'), ((8012, 8036), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8025, 8036), False, 'import pytest\n'), ((8941, 8965), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8954, 8965), False, 'import pytest\n'), ((9866, 9890), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (9879, 9890), False, 'import pytest\n'), ((2253, 2413), 'google.ai.generativelanguage.SafetySetting', 'genai.SafetySetting', ([], {'category': 'genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT', 'threshold': 'genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE'}), '(category=genai.HarmCategory.\n HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=genai.SafetySetting.\n HarmBlockThreshold.BLOCK_LOW_AND_ABOVE)\n', (2272, 2413), True, 'import google.ai.generativelanguage as genai\n'), ((6860, 6883), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (6873, 6883), True, 'import google.ai.generativelanguage as genai\n'), ((7792, 7815), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (7805, 7815), True, 'import google.ai.generativelanguage as genai\n'), ((8717, 8740), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (8730, 8740), True, 'import google.ai.generativelanguage as genai\n'), ((9647, 9670), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (9660, 9670), True, 'import google.ai.generativelanguage as genai\n'), ((5324, 5348), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (5332, 5348), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((5485, 5517), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""Additional node"""'}), "(text='Additional node')\n", (5493, 5517), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((7273, 7297), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (7281, 7297), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((8201, 8225), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (8209, 8225), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((9130, 9154), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (9138, 9154), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((10055, 10079), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (10063, 10079), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((1342, 1363), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (1352, 1363), True, 'import google.ai.generativelanguage as genai\n'), ((3938, 3959), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (3948, 3959), True, 'import google.ai.generativelanguage as genai\n'), ((1687, 1801), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/789"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/789', part_index=0)\n", (1731, 1801), True, 'import google.ai.generativelanguage as genai\n'), ((4282, 4396), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/777"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/777', part_index=0)\n", (4326, 4396), True, 'import google.ai.generativelanguage as genai\n'), ((4744, 4858), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/888"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/888', part_index=0)\n", (4788, 4858), True, 'import google.ai.generativelanguage as genai\n'), ((1522, 1563), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42."""'}), "(text='Meaning of life is 42.')\n", (1532, 1563), True, 'import google.ai.generativelanguage as genai\n'), ((4118, 4158), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42"""'}), "(text='Meaning of life is 42')\n", (4128, 4158), True, 'import google.ai.generativelanguage as genai\n'), ((4610, 4641), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Or maybe not"""'}), "(text='Or maybe not')\n", (4620, 4641), True, 'import google.ai.generativelanguage as genai\n')] |
from typing import Any, Dict, List, Optional, Tuple
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.composability.graph import ComposableGraph
from llama_index.core.schema import IndexNode, NodeWithScore, QueryBundle, TextNode
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
class ComposableGraphQueryEngine(BaseQueryEngine):
"""Composable graph query engine.
This query engine can operate over a ComposableGraph.
It can take in custom query engines for its sub-indices.
Args:
graph (ComposableGraph): A ComposableGraph object.
custom_query_engines (Optional[Dict[str, BaseQueryEngine]]): A dictionary of
custom query engines.
recursive (bool): Whether to recursively query the graph.
**kwargs: additional arguments to be passed to the underlying index query
engine.
"""
def __init__(
self,
graph: ComposableGraph,
custom_query_engines: Optional[Dict[str, BaseQueryEngine]] = None,
recursive: bool = True,
**kwargs: Any
) -> None:
"""Init params."""
self._graph = graph
self._custom_query_engines = custom_query_engines or {}
self._kwargs = kwargs
# additional configs
self._recursive = recursive
callback_manager = callback_manager_from_settings_or_context(
Settings, self._graph.service_context
)
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> Dict[str, Any]:
"""Get prompt modules."""
return {}
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return self._query_index(query_bundle, index_id=None, level=0)
@dispatcher.span
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return self._query_index(query_bundle, index_id=None, level=0)
def _query_index(
self,
query_bundle: QueryBundle,
index_id: Optional[str] = None,
level: int = 0,
) -> RESPONSE_TYPE:
"""Query a single index."""
index_id = index_id or self._graph.root_id
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
# get query engine
if index_id in self._custom_query_engines:
query_engine = self._custom_query_engines[index_id]
else:
query_engine = self._graph.get_index(index_id).as_query_engine(
**self._kwargs
)
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = query_engine.retrieve(query_bundle)
retrieve_event.on_end(payload={EventPayload.NODES: nodes})
if self._recursive:
# do recursion here
nodes_for_synthesis = []
additional_source_nodes = []
for node_with_score in nodes:
node_with_score, source_nodes = self._fetch_recursive_nodes(
node_with_score, query_bundle, level
)
nodes_for_synthesis.append(node_with_score)
additional_source_nodes.extend(source_nodes)
response = query_engine.synthesize(
query_bundle, nodes_for_synthesis, additional_source_nodes
)
else:
response = query_engine.synthesize(query_bundle, nodes)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
def _fetch_recursive_nodes(
self,
node_with_score: NodeWithScore,
query_bundle: QueryBundle,
level: int,
) -> Tuple[NodeWithScore, List[NodeWithScore]]:
"""Fetch nodes.
Uses existing node if it's not an index node.
Otherwise fetch response from corresponding index.
"""
if isinstance(node_with_score.node, IndexNode):
index_node = node_with_score.node
# recursive call
response = self._query_index(query_bundle, index_node.index_id, level + 1)
new_node = TextNode(text=str(response))
new_node_with_score = NodeWithScore(
node=new_node, score=node_with_score.score
)
return new_node_with_score, response.source_nodes
else:
return node_with_score, []
| [
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.schema.NodeWithScore"
] | [((585, 620), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (610, 620), True, 'import llama_index.core.instrumentation as instrument\n'), ((1649, 1734), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'self._graph.service_context'], {}), '(Settings, self._graph.service_context\n )\n', (1690, 1734), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context\n'), ((4741, 4798), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'new_node', 'score': 'node_with_score.score'}), '(node=new_node, score=node_with_score.score)\n', (4754, 4798), False, 'from llama_index.core.schema import IndexNode, NodeWithScore, QueryBundle, TextNode\n')] |
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
ServiceContext,
)
from llama_index.llms import LlamaCPP
from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
import llama_index.llms.llama_cpp
from langchain.embeddings import HuggingFaceEmbeddings
import config
llm = llama_index.llms.llama_cpp.LlamaCPP(
model_kwargs={"n_gpu_layers": 1},
)
embed_model = HuggingFaceEmbeddings(model_name=config.EMBEDDING_MODEL_URL)
# create a service context
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
)
# load documents
documents = SimpleDirectoryReader(
config.KNOWLEDGE_BASE_PATH
).load_data()
# create vector store index
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
# ================== Querying ================== #
# set up query engine
query_engine = index.as_query_engine()
# query_engine = index.as_query_engine()
response = query_engine.query("Who are the authors of this paper?")
print(response) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader"
] | [((431, 491), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'config.EMBEDDING_MODEL_URL'}), '(model_name=config.EMBEDDING_MODEL_URL)\n', (452, 491), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((538, 600), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (566, 600), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext\n'), ((747, 822), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (778, 822), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext\n'), ((642, 691), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['config.KNOWLEDGE_BASE_PATH'], {}), '(config.KNOWLEDGE_BASE_PATH)\n', (663, 691), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext\n')] |
import time
import llama_index
from atlassian import Bitbucket
import os
import sys
sys.path.append('../')
import local_secrets as secrets
start_time = time.time()
stash = Bitbucket('https://git.techstyle.net', token=secrets.stash_token)
os.environ['OPENAI_API_KEY'] = secrets.techstyle_openai_key
project ='DATASICENCE'
repo = stash.get_repo(project, 'brand-analytics')
length_cutoff = 100000
for repo in stash.repo_list(project):
count = 0
repo_slug = repo['slug']
files = stash.get_file_list(project, repo_slug)
index = llama_index.GPTSimpleVectorIndex([])
index_file = f'./stash_index/{project}_{repo_slug}.json'
if os.path.isfile(index_file):
continue
for file in files:
if file[-3:] not in ['.py']:
continue
try:
count = count + 1
url = f"https://git.techstyle.net/projects/{project}/repos/{repo_slug}/browse/{file}"
code = str(stash.get_content_of_file(project, repo_slug, file))
code = code[2:len(code)-1].replace("\\n", '\n')
print(file, len(code))
if len(code) > length_cutoff:
print(f'{repo_slug} {file} size {len(code)}, truncating')
code = code[0:length_cutoff]
content = f"Stash Project: {project}\nStash Repository: {repo_slug}\nStash URL: {url}\nStash Code:\n {code}"
index.insert(llama_index.Document(content))
except Exception as e:
print(f'Error {e} on {repo_slug} {file}')
index.save_to_disk(index_file)
print(f'Done, {count} files in repo {repo_slug} saved to index in {round(time.time() - start_time, 0)} seconds.')
# projects = stash.project_list()
# for project in projects:
# print(project['key'])
# repos = stash.repo_list('DataScience')
# for repo in repos:
# print(repo['slug'])
| [
"llama_index.GPTSimpleVectorIndex",
"llama_index.Document"
] | [((84, 106), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (99, 106), False, 'import sys\n'), ((153, 164), 'time.time', 'time.time', ([], {}), '()\n', (162, 164), False, 'import time\n'), ((173, 238), 'atlassian.Bitbucket', 'Bitbucket', (['"""https://git.techstyle.net"""'], {'token': 'secrets.stash_token'}), "('https://git.techstyle.net', token=secrets.stash_token)\n", (182, 238), False, 'from atlassian import Bitbucket\n'), ((540, 576), 'llama_index.GPTSimpleVectorIndex', 'llama_index.GPTSimpleVectorIndex', (['[]'], {}), '([])\n', (572, 576), False, 'import llama_index\n'), ((645, 671), 'os.path.isfile', 'os.path.isfile', (['index_file'], {}), '(index_file)\n', (659, 671), False, 'import os\n'), ((1390, 1419), 'llama_index.Document', 'llama_index.Document', (['content'], {}), '(content)\n', (1410, 1419), False, 'import llama_index\n'), ((1618, 1629), 'time.time', 'time.time', ([], {}), '()\n', (1627, 1629), False, 'import time\n')] |
import json
from typing import Dict, List
import llama_index.query_engine
from llama_index import ServiceContext, QueryBundle
from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager
from llama_index.indices.base import BaseIndex
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.llms.base import LLM
from llama_index.prompts.mixin import PromptMixinType
from llama_index.response.schema import RESPONSE_TYPE, Response
from llama_index.selectors import LLMSingleSelector
from llama_index.tools import QueryEngineTool
from common.config import DEBUG, LLM_CACHE_ENABLED
from common.llm import llm_predict, create_llm
from common.prompt import CH_SINGLE_SELECT_PROMPT_TMPL
from common.utils import ObjectEncoder
from query.query_engine import load_indices
from query.compose import create_compose_query_engine
class EchoNameEngine(BaseQueryEngine):
def __init__(self, name: str, callback_manager: CallbackManager = None):
self.name = name
super().__init__(callback_manager)
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
pass
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return Response(f"我是{self.name}")
class LlmQueryEngine(BaseQueryEngine):
def __init__(self, llm: LLM, callback_manager: CallbackManager):
self.llm = llm
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return Response(llm_predict(self.llm, query_bundle.query_str))
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
pass
def create_route_query_engine(query_engines: List[BaseQueryEngine], descriptions: List[str],
service_context: ServiceContext = None):
assert len(query_engines) == len(descriptions)
tools = []
for i, query_engine in enumerate(query_engines):
query_tool = QueryEngineTool.from_defaults(
query_engine=query_engine,
description=descriptions[i]
)
tools.append(query_tool)
return llama_index.query_engine.RouterQueryEngine(
selector=LLMSingleSelector.from_defaults(service_context=service_context,
prompt_template_str=CH_SINGLE_SELECT_PROMPT_TMPL),
service_context=service_context,
query_engine_tools=tools
)
class Chatter:
def __init__(self):
if DEBUG:
debug_handler = LlamaDebugHandler()
cb_manager = CallbackManager([debug_handler])
else:
debug_handler = None
cb_manager = CallbackManager()
llm = create_llm(cb_manager, LLM_CACHE_ENABLED)
service_context = ServiceContext.from_defaults(
llm=llm,
callback_manager=cb_manager
)
self.cb_manager = cb_manager
self.city_indices: Dict[str, List[BaseIndex]] = load_indices(service_context)
self.service_context = service_context
self.llm = llm
self.debug_handler = debug_handler
self.query_engine = self.create_query_engine()
def create_query_engine(self):
index_query_engine = create_compose_query_engine(self.city_indices, self.service_context)
index_summary = f"提供 {', '.join(self.city_indices.keys())} 这几个城市的相关信息"
llm_query_engine = LlmQueryEngine(llm=self.llm, callback_manager=self.cb_manager)
llm_summary = "提供其他所有信息"
route_query_engine = create_route_query_engine(
[index_query_engine, llm_query_engine],
[index_summary, llm_summary],
service_context=self.service_context)
return route_query_engine
def _print_and_flush_debug_info(self):
if self.debug_handler:
for event in self.debug_handler.get_events():
if event.event_type in (CBEventType.LLM, CBEventType.RETRIEVE):
print(
f"[DebugInfo] event_type={event.event_type}, content={json.dumps(event.payload, ensure_ascii=False, cls=ObjectEncoder)}")
self.debug_handler.flush_event_logs()
def chat(self, query):
response = self.query_engine.query(query)
self._print_and_flush_debug_info()
return response
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.selectors.LLMSingleSelector.from_defaults",
"llama_index.tools.QueryEngineTool.from_defaults",
"llama_index.response.schema.Response",
"llama_index.callbacks.CallbackManager"
] | [((1288, 1314), 'llama_index.response.schema.Response', 'Response', (['f"""我是{self.name}"""'], {}), "(f'我是{self.name}')\n", (1296, 1314), False, 'from llama_index.response.schema import RESPONSE_TYPE, Response\n'), ((2113, 2203), 'llama_index.tools.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'query_engine', 'description': 'descriptions[i]'}), '(query_engine=query_engine, description=\n descriptions[i])\n', (2142, 2203), False, 'from llama_index.tools import QueryEngineTool\n'), ((2853, 2894), 'common.llm.create_llm', 'create_llm', (['cb_manager', 'LLM_CACHE_ENABLED'], {}), '(cb_manager, LLM_CACHE_ENABLED)\n', (2863, 2894), False, 'from common.llm import llm_predict, create_llm\n'), ((2921, 2987), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'callback_manager': 'cb_manager'}), '(llm=llm, callback_manager=cb_manager)\n', (2949, 2987), False, 'from llama_index import ServiceContext, QueryBundle\n'), ((3115, 3144), 'query.query_engine.load_indices', 'load_indices', (['service_context'], {}), '(service_context)\n', (3127, 3144), False, 'from query.query_engine import load_indices\n'), ((3378, 3446), 'query.compose.create_compose_query_engine', 'create_compose_query_engine', (['self.city_indices', 'self.service_context'], {}), '(self.city_indices, self.service_context)\n', (3405, 3446), False, 'from query.compose import create_compose_query_engine\n'), ((1673, 1718), 'common.llm.llm_predict', 'llm_predict', (['self.llm', 'query_bundle.query_str'], {}), '(self.llm, query_bundle.query_str)\n', (1684, 1718), False, 'from common.llm import llm_predict, create_llm\n'), ((2338, 2456), 'llama_index.selectors.LLMSingleSelector.from_defaults', 'LLMSingleSelector.from_defaults', ([], {'service_context': 'service_context', 'prompt_template_str': 'CH_SINGLE_SELECT_PROMPT_TMPL'}), '(service_context=service_context,\n prompt_template_str=CH_SINGLE_SELECT_PROMPT_TMPL)\n', (2369, 2456), False, 'from llama_index.selectors import LLMSingleSelector\n'), ((2671, 2690), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {}), '()\n', (2688, 2690), False, 'from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager\n'), ((2716, 2748), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[debug_handler]'], {}), '([debug_handler])\n', (2731, 2748), False, 'from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager\n'), ((2821, 2838), 'llama_index.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (2836, 2838), False, 'from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager\n'), ((4202, 4266), 'json.dumps', 'json.dumps', (['event.payload'], {'ensure_ascii': '(False)', 'cls': 'ObjectEncoder'}), '(event.payload, ensure_ascii=False, cls=ObjectEncoder)\n', (4212, 4266), False, 'import json\n')] |
import qdrant_client
from llama_index import (
VectorStoreIndex,
ServiceContext,
)
from llama_index.llms import Ollama
from llama_index.vector_stores.qdrant import QdrantVectorStore
import llama_index
llama_index.set_global_handler("simple")
# re-initialize the vector store
client = qdrant_client.QdrantClient(
path="./qdrant_data"
)
vector_store = QdrantVectorStore(client=client, collection_name="tweets")
# get the LLM again
llm = Ollama(model="mistral")
service_context = ServiceContext.from_defaults(llm=llm,embed_model="local")
# load the index from the vector store
index = VectorStoreIndex.from_vector_store(vector_store=vector_store,service_context=service_context)
query_engine = index.as_query_engine(similarity_top_k=20)
response = query_engine.query("Does the author like web frameworks? Give details.")
print(response)
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.set_global_handler",
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.llms.Ollama"
] | [((210, 250), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (240, 250), False, 'import llama_index\n'), ((294, 342), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""./qdrant_data"""'}), "(path='./qdrant_data')\n", (320, 342), False, 'import qdrant_client\n'), ((364, 422), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': '"""tweets"""'}), "(client=client, collection_name='tweets')\n", (381, 422), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((450, 473), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': '"""mistral"""'}), "(model='mistral')\n", (456, 473), False, 'from llama_index.llms import Ollama\n'), ((492, 550), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (520, 550), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((598, 696), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (632, 696), False, 'from llama_index import VectorStoreIndex, ServiceContext\n')] |
import os
import glob
import llama_index
from llama_index.core import ServiceContext
from llama_index.llms.ollama import Ollama
from llama_index.core import SimpleDirectoryReader
from llama_index.core.response_synthesizers import TreeSummarize
# MODEL = "mistral"
MODEL = "llama2"
# MODEL = "llama2:text" # Doesn't follow instructions.
# MODEL = "mistral:instruct"
# MODEL = "llama2:13b" # Crushes my Mac
DATA_DIR = "data"
SUMMARY_ROOT = "summaries"
SUMMARY_DIR = os.path.join(SUMMARY_ROOT, MODEL).replace(":", "_")
os.makedirs(SUMMARY_DIR, exist_ok=True)
def saveText(path, text):
"Save the given text to a file at the specified path."
with open(path, "w") as f:
f.write(text)
def commentPaths(ticketNumber):
"Returns a sorted list of file paths for the comments in Zendesk ticket `ticketNumber`."
ticketDir = os.path.join(DATA_DIR, ticketNumber)
return sorted(glob.glob(os.path.join(ticketDir, "*.txt")))
def summaryPath(ticketNumber):
"Returns the file path for where we store the summary of Zendesk ticket `ticketNumber`."
return os.path.join(SUMMARY_DIR, f"{ticketNumber}.txt")
def totalSizeKB(paths):
"Returns the total size in kilobytes of the files specified by `paths`."
return sum(os.path.getsize(path) for path in paths) / 1024
def currentTime():
"Returns the current time in the format 'dd/mm/YYYY HH:MM:SS'."
from datetime import datetime
now = datetime.now()
return now.strftime("%d/%m/%Y %H:%M:%S")
TIMEOUT_SEC = 600
print(f"Loading {MODEL}")
llm = Ollama(model=MODEL, request_timeout=TIMEOUT_SEC)
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local")
summarizer = TreeSummarize(service_context=service_context, verbose=False)
SUMMARY_PROMPT = """The following text is a series of messages from a PaperCut support ticket.
Summarise the whole conversation, including a list of participants and who they work for,
the problem or problems, the key events and date they occurred,
and the current status of the ticket. Include any log lines from the messages."""
def summariseTicket(ticketNumber):
"Summarizes the Zendesk ticket with the given `ticketNumber` and returns the summary text."
input_files = commentPaths(ticketNumber)
reader = SimpleDirectoryReader(input_files=input_files)
docs = reader.load_data()
texts = [doc.text for doc in docs]
return summarizer.get_response(SUMMARY_PROMPT, texts)
#
# Test case.
#
if __name__ == "__main__":
import time
print(f"MODEL={MODEL}")
ticketNumbers = sorted(os.path.basename(path) for path in glob.glob(os.path.join(DATA_DIR, "*")))
ticketNumbers.sort(key=lambda k: (totalSizeKB(commentPaths(k)), k))
print(ticketNumbers)
for i, ticketNumber in enumerate(ticketNumbers):
paths = commentPaths(ticketNumber)
print(f"{i:4}: {ticketNumber:8} {len(paths):3} comments {totalSizeKB(paths):7.3f} kb")
# ticketNumbers = ticketNumbers[:1]
t00 = time.time()
summaries = {}
durations = {}
commentCounts = {}
commentSizes = {}
for i, ticketNumber in enumerate(ticketNumbers):
commentCount = len(commentPaths(ticketNumber))
commentSize = totalSizeKB(commentPaths(ticketNumber))
print(f"{i:2}: ticketNumber={ticketNumber:8} {commentCount:3} comments {commentSize:7.3f} kb {currentTime()}",
flush=True)
if os.path.exists(summaryPath(ticketNumber)):
print(f"Skipping ticket {ticketNumber}", flush=True)
continue # Skip tickets that have already been summarised.
t0 = time.time()
summary = summariseTicket(ticketNumber)
duration = time.time() - t0
description = f"{commentCount} comments {commentSize:7.3f} kb {duration:5.2f} sec summary={len(summary)}"
print(f" {description}", flush=True)
with open(summaryPath(ticketNumber), "w") as f:
print(f"Summary: ticket {ticketNumber}: {description} -------------------------", file=f)
print(summary, file=f)
summaries[ticketNumber] = summary
durations[ticketNumber] = duration
commentCounts[ticketNumber] = commentCount
commentSizes[ticketNumber] = commentSize
duration = time.time() - t00
print("====================^^^====================")
print(f"Duration: {duration:.2f} seconds")
for i, ticketNumber in enumerate(ticketNumbers):
commentCount = commentCounts[ticketNumber]
commentSize = totalSizeKB(commentPaths(ticketNumber))
duration = durations[ticketNumber]
print(f"{i:2}: {ticketNumber:8}: {commentCount:3} comments {commentSize:7.3f} kb {duration:5.2f} seconds")
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.ollama.Ollama",
"llama_index.core.response_synthesizers.TreeSummarize",
"llama_index.core.ServiceContext.from_defaults"
] | [((525, 564), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (536, 564), False, 'import os\n'), ((1540, 1588), 'llama_index.llms.ollama.Ollama', 'Ollama', ([], {'model': 'MODEL', 'request_timeout': 'TIMEOUT_SEC'}), '(model=MODEL, request_timeout=TIMEOUT_SEC)\n', (1546, 1588), False, 'from llama_index.llms.ollama import Ollama\n'), ((1607, 1665), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (1635, 1665), False, 'from llama_index.core import ServiceContext\n'), ((1679, 1740), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context', 'verbose': '(False)'}), '(service_context=service_context, verbose=False)\n', (1692, 1740), False, 'from llama_index.core.response_synthesizers import TreeSummarize\n'), ((846, 882), 'os.path.join', 'os.path.join', (['DATA_DIR', 'ticketNumber'], {}), '(DATA_DIR, ticketNumber)\n', (858, 882), False, 'import os\n'), ((1082, 1130), 'os.path.join', 'os.path.join', (['SUMMARY_DIR', 'f"""{ticketNumber}.txt"""'], {}), "(SUMMARY_DIR, f'{ticketNumber}.txt')\n", (1094, 1130), False, 'import os\n'), ((1428, 1442), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1440, 1442), False, 'from datetime import datetime\n'), ((2263, 2309), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'input_files'}), '(input_files=input_files)\n', (2284, 2309), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((2967, 2978), 'time.time', 'time.time', ([], {}), '()\n', (2976, 2978), False, 'import time\n'), ((472, 505), 'os.path.join', 'os.path.join', (['SUMMARY_ROOT', 'MODEL'], {}), '(SUMMARY_ROOT, MODEL)\n', (484, 505), False, 'import os\n'), ((3583, 3594), 'time.time', 'time.time', ([], {}), '()\n', (3592, 3594), False, 'import time\n'), ((4236, 4247), 'time.time', 'time.time', ([], {}), '()\n', (4245, 4247), False, 'import time\n'), ((911, 943), 'os.path.join', 'os.path.join', (['ticketDir', '"""*.txt"""'], {}), "(ticketDir, '*.txt')\n", (923, 943), False, 'import os\n'), ((2553, 2575), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2569, 2575), False, 'import os\n'), ((3662, 3673), 'time.time', 'time.time', ([], {}), '()\n', (3671, 3673), False, 'import time\n'), ((1248, 1269), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (1263, 1269), False, 'import os\n'), ((2598, 2625), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""*"""'], {}), "(DATA_DIR, '*')\n", (2610, 2625), False, 'import os\n')] |
from io import BytesIO
from flask import Flask, jsonify
import os
# import tweepy
from dotenv import load_dotenv
from flask import request,jsonify
import snscrape.modules.twitter as snstwitter
from snscrape.modules.twitter import TwitterSearchScraper, TwitterSearchScraperMode
import requests
from goose3 import Goose
from wordcloud import WordCloud, STOPWORDS
import plotly.graph_objs as go
import json
import plotly
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import base64
import pandas as pd
# from flask import send_file
from flask import send_file
import datetime
import plotly.express as px
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import logging
import sys
from llama_index import GPTVectorStoreIndex, TwitterTweetReader
import os
import llama_index
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
os.environ['OPENAI_API_KEY']='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'
app = Flask(__name__)
twitterData = None
queryString = None
# print(type(twitterData))
load_dotenv()
print(os.getenv("HUGGINGFACE_API"))
@app.route('/')
def hello_geek():
return '<h1>Hello from Flask & Docker</h2>'
@app.route('/twitter')
def twitter():
query = request.args['query']
retweet = 0
likecount = 0
hashtags = set([])
i=0
global twitterData
global queryString
print("Url: Twitter, data: ", twitterData)
print("Url: Twitter, query: ", queryString)
# if twitterData is None:
# twitterData = snstwitter.TwitterSearchScraper(query).get_items()
# queryString = query
# else:
# if queryString != query:
# twitterData = snstwitter.TwitterSearchScraper(query).get_items()
# queryString = query
# else:
# print(vars(twitterData))
# print("not scraping again")
# twitter_scraper = TwitterSearchScraper(query)
# twitterData = list(twitter_scraper.get_items(TwitterSearchScraperMode.TOP))
twitterData = snstwitter.TwitterSearchScraper(query).get_items()
for tweet in twitterData:
print("looping through tweets")
print(vars(tweet))
likecount += tweet.likeCount
retweet += tweet.retweetCount + tweet.quoteCount
if(tweet.hashtags != None):
for h in tweet.hashtags:
hashtags.add(h)
i+= 1
if(i==200):
break
tweets = {"likecount":likecount,"retweet":retweet,"hashtags":list(hashtags),"count":i}
print(tweets)
return jsonify({'result':tweets})
@app.route('/xyz')
def xyz():
query = request.args['query']
tweets = []
for tweet in snstwitter.TwitterProfileScraper(query).get_items():
tweets.append(tweet.date)
return tweets
API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
headers = {"Authorization": "Bearer " + os.getenv('HUGGINGFACE_API') }
API_URL_PROP = "https://api-inference.huggingface.co/models/valurank/distilroberta-propaganda-2class"
API_URL_HATE = "https://api-inference.huggingface.co/models/IMSyPP/hate_speech_en"
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
def queryprop(payload):
response = requests.post(API_URL_PROP, headers=headers, json=payload)
return response.json()
def query_hate(payload):
response = requests.post(API_URL_HATE, headers=headers, json=payload)
return response.json()
@app.route('/sentiment')
def sentiment():
query = request.args['query']
retweet = 0
likecount = 0
hashtags = []
senti=[]
i=0
positive=0
negative=0
neutral=0
global twitterData
global queryString
print("Url: Sentiment, data: ", twitterData)
# if twitterData is None:
# twitterData = snstwitter.TwitterSearchScraper(query).get_items()
# queryString = query
# else:
# if queryString != query:
# twitterData = snstwitter.TwitterSearchScraper(query).get_items()
# queryString = query
twitterData = snstwitter.TwitterSearchScraper(query).get_items()
for tweet in twitterData:
if tweet.lang=="en":
i+=1
if(i==200):
break
sentence= tweet.rawContent
print(sentence)
sid_obj = SentimentIntensityAnalyzer()
sentiment_dict = sid_obj.polarity_scores([sentence])
print(sentiment_dict['neg']*100, "% Negative")
print(sentiment_dict['pos']*100, "% Positive")
print("Review Overall Analysis", end = " ")
if sentiment_dict['compound'] >= 0.05 :
positive+=1
elif sentiment_dict['compound'] <= -0.05 :
negative+=1
else :
neutral+=1
senti={"positive":positive, "negative":negative, "neutral":neutral}
labels = list(senti.keys())
values = list(senti.values())
return {"labels":labels, "values":values}
@app.route('/sentiment_article')
def sentiment_article():
senti=[]
url = 'https://blogs.jayeshvp24.dev/dive-into-web-design'
goose = Goose()
articles = goose.extract(url)
sentence1 = articles.cleaned_text
sid_obj = SentimentIntensityAnalyzer()
sentiment_dict = sid_obj.polarity_scores([sentence1])
print(sentiment_dict['neg']*100, "% Negative")
print(sentiment_dict['pos']*100, "% Positive")
print("Review Overall Analysis", end = " ")
if sentiment_dict['compound'] >= 0.05 :
senti.append("Positive")
elif sentiment_dict['compound'] <= -0.05 :
senti.append("Negative")
else :
senti.append("Neutral")
return jsonify({"result":senti})
@app.route('/article-sentiment')
def articleSentiment():
url = request.args['url']
# url = 'https://blogs.jayeshvp24.dev/dive-into-web-design'
goose = Goose()
articles = goose.extract(url)
sentence = articles.cleaned_text[0:500]
print(sentence)
output=query_hate({
"inputs": str(sentence)})
# print(output[0][0])
result = {}
for data in output[0]:
if data['label'] == "LABEL_0":
result["ACCEPTABLE"] = data['score']
elif data['label'] == "LABEL_1":
result["INAPPROAPRIATE"] = data['score']
elif data['label'] == "LABEL_2":
result["OFFENSIVE"] = data['score']
elif data['label'] == "LABEL_3":
result["VIOLENT"] = data['score']
labels = list(result.keys())
values = list(result.values())
# # Use `hole` to create a donut-like pie chart
# fig = go.Figure(data=[go.Pie(labels=labels, values=values, hole=.5)])
# # fig.show()
# graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# print(graphJSON)
# print(type(fig))
# return graphJSON
return jsonify({"labels": labels, "values": values})
@app.route('/summary')
def summary():
try:
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
output = query({
"inputs": articles.cleaned_text
})
print(output)
except:
return "Please put the relevant text article"
return jsonify({"result": output[0]['summary_text']})
@app.route('/wordcloud')
def plotly_wordwordcloud():
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
text = articles.cleaned_text
wordcloud = WordCloud(width=1280, height=853, margin=0,
colormap='Blues').generate(text)
wordcloud.to_file("./wordcloud.png")
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.margins(x=0, y=0)
# plt.show()
# img = BytesIO()
# plt.savefig("./wordcloud.png", format='png')
# plt.imsave("./wordcloud.png", format='png')
# img.seek(0)
# # nimg = Image.frombytes("RGBA", (128, 128), img, 'raw')
# nimg = Image.frombuffer(img)
# nimg.save("./wordcloud.png")
# plot_url = base64.b64encode(img.getvalue()).decode('utf8')
return send_file("./wordcloud.png", mimetype='image/png')
# return render_template('plot.html', plot_url=plot_url)
# @app.route('/cloud')
# def plotly_wordcloud():
# url = 'https://blogs.jayeshvp24.dev/dive-into-web-design'
# goose = Goose()
# articles = goose.extract(url)
# text = query({
# "inputs": articles.cleaned_text
# })
# wc = WordCloud(stopwords = set(STOPWORDS),
# max_words = 200,
# max_font_size = 100)
# wc.generate(text[0]['summary_text'])
@app.route('/propaganda')
def propaganda():
url = request.args['url']
goose = Goose()
articles = goose.extract(url)
output = queryprop({
"inputs": articles.cleaned_text[0:600]
})
yes = output[0][0]['score']
no = 1 - yes
return jsonify({"yes": yes, "no": no})
@app.route("/chat", methods=["GET"])
def chat():
# Get the query from the request body.
query = request.args['url']
# create an app in https://developer.twitter.com/en/apps
# create reader, specify twitter handles
reader = TwitterTweetReader(BEARER_TOKEN)
documents = reader.load_data(["ANI"])
# Create a new instance of the llama chatbot agent.
agent = llama_index.GPTVectorStoreIndex.from_documents(documents)
chat_engine = agent.as_chat_engine(verbose=True)
# Get the response from the llama chatbot agent.
response = chat_engine.chat(query)
# Return the response as JSON.
return jsonify({"response": response})
# @app.route('/cloud')
# def plotly_wordcloud():
# url = request.args['url']
# goose = Goose()
# articles = goose.extract(url)
# text = query({
# "inputs": articles.cleaned_text
# })
# wc = WordCloud(stopwords = set(STOPWORDS),
# max_words = 200,
# max_font_size = 100)
# wc.generate(text[0]['summary_text'])
# word_list=[]
# freq_list=[]
# fontsize_list=[]
# position_list=[]
# orientation_list=[]
# color_list=[]
# for (word, freq), fontsize, position, orientation, color in wc.layout_:
# word_list.append(word)
# freq_list.append(freq)
# fontsize_list.append(fontsize)
# position_list.append(position)
# orientation_list.append(orientation)
# color_list.append(color)
# # get the positions
# x=[]
# y=[]
# for i in position_list:
# x.append(i[0])
# y.append(i[1])
# # get the relative occurence frequencies
# new_freq_list = []
# for i in freq_list:
# new_freq_list.append(i*100)
# new_freq_list
# trace = go.Scatter(x=x,
# y=y,
# textfont = dict(size=new_freq_list,
# color=color_list),
# hoverinfo='text',
# hovertext=['{0}{1}'.format(w, f) for w, f in zip(word_list, freq_list)],
# mode='text',
# text=word_list
# )
# layout = go.Layout({'xaxis': {'showgrid': False, 'showticklabels': False, 'zeroline': False},
# 'yaxis': {'showgrid': False, 'showticklabels': False, 'zeroline': False}})
# fig = go.Figure(data=[trace], layout=layout)
# graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# print(graphJSON)
# print(type(fig))
# return graphJSON
@app.route('/authenticity')
def auth():
url = request.args['url']
lis = []
df = pd.read_csv('blacklist.csv')
for i in range(len(df)):
lis.append(df.loc[i, "MBFC"])
for l in lis:
if(url.__contains__(l)):
return {"authentic":False}
return { "authentic": True }
@app.route('/bot-activity')
def botActivity():
url = request.args['url']
i=0
usernames = []
time = []
finalusername = []
for tweet in snstwitter.TwitterSearchScraper(url).get_items():
usernames.append(tweet.user.username)
time.append(tweet.date)
if(i==150):
break
i+=1
flag = False
for i in range(len(time)-1):
a = time[i]
b = time[i+1]
c = a-b
if(c.seconds <= 60):
finalusername.append(usernames[i+1])
print("username: ", finalusername)
if(len(finalusername) > 3):
flag = True
return jsonify({"bots":list(set(finalusername)),"flag":flag})
#baseline model
if __name__ == '__main__':
app.run(debug=True)
| [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.TwitterTweetReader"
] | [((973, 988), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (978, 988), False, 'from flask import Flask, jsonify\n'), ((1057, 1070), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1068, 1070), False, 'from dotenv import load_dotenv\n'), ((1078, 1106), 'os.getenv', 'os.getenv', (['"""HUGGINGFACE_API"""'], {}), "('HUGGINGFACE_API')\n", (1087, 1106), False, 'import os\n'), ((2573, 2600), 'flask.jsonify', 'jsonify', (["{'result': tweets}"], {}), "({'result': tweets})\n", (2580, 2600), False, 'from flask import request, jsonify\n'), ((3179, 3232), 'requests.post', 'requests.post', (['API_URL'], {'headers': 'headers', 'json': 'payload'}), '(API_URL, headers=headers, json=payload)\n', (3192, 3232), False, 'import requests\n'), ((3294, 3352), 'requests.post', 'requests.post', (['API_URL_PROP'], {'headers': 'headers', 'json': 'payload'}), '(API_URL_PROP, headers=headers, json=payload)\n', (3307, 3352), False, 'import requests\n'), ((3415, 3473), 'requests.post', 'requests.post', (['API_URL_HATE'], {'headers': 'headers', 'json': 'payload'}), '(API_URL_HATE, headers=headers, json=payload)\n', (3428, 3473), False, 'import requests\n'), ((5203, 5210), 'goose3.Goose', 'Goose', ([], {}), '()\n', (5208, 5210), False, 'from goose3 import Goose\n'), ((5297, 5325), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (5323, 5325), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((5746, 5772), 'flask.jsonify', 'jsonify', (["{'result': senti}"], {}), "({'result': senti})\n", (5753, 5772), False, 'from flask import request, jsonify\n'), ((5939, 5946), 'goose3.Goose', 'Goose', ([], {}), '()\n', (5944, 5946), False, 'from goose3 import Goose\n'), ((6889, 6934), 'flask.jsonify', 'jsonify', (["{'labels': labels, 'values': values}"], {}), "({'labels': labels, 'values': values})\n", (6896, 6934), False, 'from flask import request, jsonify\n'), ((7274, 7320), 'flask.jsonify', 'jsonify', (["{'result': output[0]['summary_text']}"], {}), "({'result': output[0]['summary_text']})\n", (7281, 7320), False, 'from flask import request, jsonify\n'), ((7417, 7424), 'goose3.Goose', 'Goose', ([], {}), '()\n', (7422, 7424), False, 'from goose3 import Goose\n'), ((7652, 7699), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {'interpolation': '"""bilinear"""'}), "(wordcloud, interpolation='bilinear')\n", (7662, 7699), True, 'import matplotlib.pyplot as plt\n'), ((7704, 7719), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7712, 7719), True, 'import matplotlib.pyplot as plt\n'), ((7724, 7745), 'matplotlib.pyplot.margins', 'plt.margins', ([], {'x': '(0)', 'y': '(0)'}), '(x=0, y=0)\n', (7735, 7745), True, 'import matplotlib.pyplot as plt\n'), ((8119, 8169), 'flask.send_file', 'send_file', (['"""./wordcloud.png"""'], {'mimetype': '"""image/png"""'}), "('./wordcloud.png', mimetype='image/png')\n", (8128, 8169), False, 'from flask import send_file\n'), ((8727, 8734), 'goose3.Goose', 'Goose', ([], {}), '()\n', (8732, 8734), False, 'from goose3 import Goose\n'), ((8907, 8938), 'flask.jsonify', 'jsonify', (["{'yes': yes, 'no': no}"], {}), "({'yes': yes, 'no': no})\n", (8914, 8938), False, 'from flask import request, jsonify\n'), ((9185, 9217), 'llama_index.TwitterTweetReader', 'TwitterTweetReader', (['BEARER_TOKEN'], {}), '(BEARER_TOKEN)\n', (9203, 9217), False, 'from llama_index import GPTVectorStoreIndex, TwitterTweetReader\n'), ((9328, 9385), 'llama_index.GPTVectorStoreIndex.from_documents', 'llama_index.GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (9374, 9385), False, 'import llama_index\n'), ((9579, 9610), 'flask.jsonify', 'jsonify', (["{'response': response}"], {}), "({'response': response})\n", (9586, 9610), False, 'from flask import request, jsonify\n'), ((11668, 11696), 'pandas.read_csv', 'pd.read_csv', (['"""blacklist.csv"""'], {}), "('blacklist.csv')\n", (11679, 11696), True, 'import pandas as pd\n'), ((2928, 2956), 'os.getenv', 'os.getenv', (['"""HUGGINGFACE_API"""'], {}), "('HUGGINGFACE_API')\n", (2937, 2956), False, 'import os\n'), ((7051, 7058), 'goose3.Goose', 'Goose', ([], {}), '()\n', (7056, 7058), False, 'from goose3 import Goose\n'), ((2016, 2054), 'snscrape.modules.twitter.TwitterSearchScraper', 'snstwitter.TwitterSearchScraper', (['query'], {}), '(query)\n', (2047, 2054), True, 'import snscrape.modules.twitter as snstwitter\n'), ((2699, 2738), 'snscrape.modules.twitter.TwitterProfileScraper', 'snstwitter.TwitterProfileScraper', (['query'], {}), '(query)\n', (2731, 2738), True, 'import snscrape.modules.twitter as snstwitter\n'), ((4102, 4140), 'snscrape.modules.twitter.TwitterSearchScraper', 'snstwitter.TwitterSearchScraper', (['query'], {}), '(query)\n', (4133, 4140), True, 'import snscrape.modules.twitter as snstwitter\n'), ((4374, 4402), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (4400, 4402), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((7508, 7569), 'wordcloud.WordCloud', 'WordCloud', ([], {'width': '(1280)', 'height': '(853)', 'margin': '(0)', 'colormap': '"""Blues"""'}), "(width=1280, height=853, margin=0, colormap='Blues')\n", (7517, 7569), False, 'from wordcloud import WordCloud, STOPWORDS\n'), ((12048, 12084), 'snscrape.modules.twitter.TwitterSearchScraper', 'snstwitter.TwitterSearchScraper', (['url'], {}), '(url)\n', (12079, 12084), True, 'import snscrape.modules.twitter as snstwitter\n')] |
## main function of AWS Lambda function
import llama_index
from llama_index import download_loader
import boto3
import json
import urllib.parse
from llama_index import SimpleDirectoryReader
def main(event, context):
# extracting s3 bucket and key information from SQS message
print(event)
s3_info = json.loads(event['Records'][0]['body'])
bucket_name = s3_info['Records'][0]['s3']['bucket']['name']
object_key = urllib.parse.unquote_plus(s3_info['Records'][0]['s3']['object']['key'], encoding='utf-8')
try:
# the first approach to rea =d the content of uploaded file.
S3Reader = download_loader("S3Reader", custom_path='/tmp/llamahub_modules')
loader = S3Reader(bucket=bucket_name, key=object_key)
documents = loader.load_data()
# the second approach to read the content of uploaded file
# Creating an S3 client
# s3_client = boto3.client('s3')
# response = s3_client.get_object(Bucket=bucket_name, Key=object_key)
# file_content = response['Body'].read().decode('utf-8')
# save the file content to /tmp folder
# tmp_file_path = f"/tmp/{object_key.split('/')[-1]}"
# with open(tmp_file_path, "w") as f:
# tmp_file_path.write(file_content)
# reader = SimpleDirectoryReader(input_files=tmp_file_path)
# doc = reader.load_data()
# print(f"Loaded {len(doc)} doc")
## TODO
# ReIndex or Create New Index from document
# Update or Insert into VectoDatabase
# (Optional) Update or Insert into DocStorage DB
# Update or Insert index to MongoDB
# Can have Ingestion Pipeline with Redis Cache
return {
'statusCode': 200
}
# # creating an index
except Exception as e:
print(f"Error reading the file {object_key}: {str(e)}")
return {
'statusCode': 500,
'body': json.dumps('Error reading the file')
} | [
"llama_index.download_loader"
] | [((313, 352), 'json.loads', 'json.loads', (["event['Records'][0]['body']"], {}), "(event['Records'][0]['body'])\n", (323, 352), False, 'import json\n'), ((628, 692), 'llama_index.download_loader', 'download_loader', (['"""S3Reader"""'], {'custom_path': '"""/tmp/llamahub_modules"""'}), "('S3Reader', custom_path='/tmp/llamahub_modules')\n", (643, 692), False, 'from llama_index import download_loader\n'), ((1963, 1999), 'json.dumps', 'json.dumps', (['"""Error reading the file"""'], {}), "('Error reading the file')\n", (1973, 1999), False, 'import json\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index", "import llama_index"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index", "from llama_index"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
init_file_path = local_dir_path / "__init__.py"
# if the __init__.py file do not exists, we need to create it
mode = "a+" if not os.path.exists(init_file_path) else "r+"
with open(init_file_path, mode) as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
# loads the module into memory
if override_path:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{base_file_name}"
)
if spec is None:
raise ValueError(f"Could not find file: {dirpath}/{base_file_name}.")
else:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{module_id}/{base_file_name}"
)
if spec is None:
raise ValueError(
f"Could not find file: {dirpath}/{module_id}/{base_file_name}."
)
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.download.utils.get_exports",
"llama_index.download.utils.initialize_directory"
] | [((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5550, 5583), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5564, 5583), False, 'import os\n'), ((7403, 7471), 'llama_index.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7423, 7471), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8849, 8876), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8870, 8876), False, 'from importlib import util\n'), ((1197, 1217), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1201, 1217), False, 'from pathlib import Path\n'), ((1434, 1468), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1448, 1468), False, 'import os\n'), ((1938, 1969), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1948, 1969), False, 'import json\n'), ((2263, 2298), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2278, 2298), False, 'import os\n'), ((3131, 3151), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3135, 3151), False, 'from pathlib import Path\n'), ((3347, 3386), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3358, 3386), False, 'import os\n'), ((5139, 5172), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5153, 5172), False, 'import os\n'), ((8326, 8416), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{base_file_name}')\n", (8354, 8416), False, 'from importlib import util\n'), ((8566, 8668), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{module_id}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{module_id}/{base_file_name}')\n", (8594, 8668), False, 'from importlib import util\n'), ((9299, 9401), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9312, 9401), False, 'import requests\n'), ((1536, 1548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1545, 1548), False, 'import json\n'), ((3310, 3337), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3324, 3337), False, 'import os\n'), ((4385, 4420), 'llama_index.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4396, 4420), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2356, 2389), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2370, 2389), False, 'import os\n'), ((2407, 2437), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2418, 2437), False, 'import os\n'), ((5835, 5927), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5856, 5927), False, 'import subprocess\n'), ((4620, 4650), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4634, 4650), False, 'import os\n'), ((5675, 5698), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5679, 5698), False, 'from pathlib import Path\n')] |
import json
from typing import Dict, List
import llama_index.query_engine
from llama_index import ServiceContext, QueryBundle
from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager
from llama_index.indices.base import BaseIndex
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.llms.base import LLM
from llama_index.prompts.mixin import PromptMixinType
from llama_index.response.schema import RESPONSE_TYPE, Response
from llama_index.selectors import LLMSingleSelector
from llama_index.tools import QueryEngineTool
from common.config import DEBUG, LLM_CACHE_ENABLED
from common.llm import llm_predict, create_llm
from common.prompt import CH_SINGLE_SELECT_PROMPT_TMPL
from common.utils import ObjectEncoder
from query_todo.query_engine import load_indices
from query_todo.compose import create_compose_query_engine
class EchoNameEngine(BaseQueryEngine):
def __init__(self, name: str, callback_manager: CallbackManager = None):
self.name = name
super().__init__(callback_manager)
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
pass
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return Response(f"我是{self.name}")
class LlmQueryEngine(BaseQueryEngine):
def __init__(self, llm: LLM, callback_manager: CallbackManager):
self.llm = llm
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return Response(llm_predict(self.llm, query_bundle.query_str))
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
pass
def create_route_query_engine(query_engines: List[BaseQueryEngine], descriptions: List[str],
service_context: ServiceContext = None):
assert len(query_engines) == len(descriptions)
# TODO
# 根据传入的多个query_engines和descriptions创建 RouteQueryEngine,实现query engine 的路由
# https://docs.llamaindex.ai/en/stable/module_guides/querying/router/root.html#using-as-a-query-engine
raise NotImplementedError
class Chatter:
def __init__(self):
if DEBUG:
debug_handler = LlamaDebugHandler()
cb_manager = CallbackManager([debug_handler])
else:
debug_handler = None
cb_manager = CallbackManager()
llm = create_llm(cb_manager, LLM_CACHE_ENABLED)
service_context = ServiceContext.from_defaults(
llm=llm,
callback_manager=cb_manager
)
self.cb_manager = cb_manager
self.city_indices: Dict[str, List[BaseIndex]] = load_indices(service_context)
self.service_context = service_context
self.llm = llm
self.debug_handler = debug_handler
self.query_engine = self.create_query_engine()
def create_query_engine(self):
index_query_engine = create_compose_query_engine(self.city_indices, self.service_context)
index_summary = f"提供 {', '.join(self.city_indices.keys())} 这几个城市的相关信息"
llm_query_engine = LlmQueryEngine(llm=self.llm, callback_manager=self.cb_manager)
llm_summary = f"提供其他所有信息"
# 实现意图识别,把不同的query路由到不同的query_engine上,实现聊天和城市信息查询两个功能的分流
# https://docs.llamaindex.ai/en/stable/module_guides/querying/router/root.html#using-as-a-query-engine
raise NotImplementedError
def _print_and_flush_debug_info(self):
if self.debug_handler:
for event in self.debug_handler.get_events():
if event.event_type in (CBEventType.LLM, CBEventType.RETRIEVE):
print(
f"[DebugInfo] event_type={event.event_type}, content={json.dumps(event.payload, ensure_ascii=False, cls=ObjectEncoder)}")
self.debug_handler.flush_event_logs()
def chat(self, query):
response = self.query_engine.query(query)
self._print_and_flush_debug_info()
return response
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.CallbackManager",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.response.schema.Response"
] | [((1298, 1324), 'llama_index.response.schema.Response', 'Response', (['f"""我是{self.name}"""'], {}), "(f'我是{self.name}')\n", (1306, 1324), False, 'from llama_index.response.schema import RESPONSE_TYPE, Response\n'), ((2530, 2571), 'common.llm.create_llm', 'create_llm', (['cb_manager', 'LLM_CACHE_ENABLED'], {}), '(cb_manager, LLM_CACHE_ENABLED)\n', (2540, 2571), False, 'from common.llm import llm_predict, create_llm\n'), ((2598, 2664), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'callback_manager': 'cb_manager'}), '(llm=llm, callback_manager=cb_manager)\n', (2626, 2664), False, 'from llama_index import ServiceContext, QueryBundle\n'), ((2792, 2821), 'query_todo.query_engine.load_indices', 'load_indices', (['service_context'], {}), '(service_context)\n', (2804, 2821), False, 'from query_todo.query_engine import load_indices\n'), ((3055, 3123), 'query_todo.compose.create_compose_query_engine', 'create_compose_query_engine', (['self.city_indices', 'self.service_context'], {}), '(self.city_indices, self.service_context)\n', (3082, 3123), False, 'from query_todo.compose import create_compose_query_engine\n'), ((1683, 1728), 'common.llm.llm_predict', 'llm_predict', (['self.llm', 'query_bundle.query_str'], {}), '(self.llm, query_bundle.query_str)\n', (1694, 1728), False, 'from common.llm import llm_predict, create_llm\n'), ((2348, 2367), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {}), '()\n', (2365, 2367), False, 'from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager\n'), ((2393, 2425), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[debug_handler]'], {}), '([debug_handler])\n', (2408, 2425), False, 'from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager\n'), ((2498, 2515), 'llama_index.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (2513, 2515), False, 'from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager\n'), ((3855, 3919), 'json.dumps', 'json.dumps', (['event.payload'], {'ensure_ascii': '(False)', 'cls': 'ObjectEncoder'}), '(event.payload, ensure_ascii=False, cls=ObjectEncoder)\n', (3865, 3919), False, 'import json\n')] |
import asyncio
import math
import numpy as np
import random
import tqdm
from functools import reduce
from typing import Any, List, Dict, Sequence, Union, Coroutine, Iterable
from llama_index.core.async_utils import asyncio_module
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.llms import LLM, CompletionResponse
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llama_dataset.base import CreatedBy, CreatedByType
from llama_index.core.llama_dataset.simple import (
LabelledSimpleDataset,
LabelledSimpleDataExample,
)
from llama_index.packs.diff_private_simple_dataset.templates import (
zero_shot_completion_template,
few_shot_completion_template,
single_example_template,
)
from llama_index.packs.diff_private_simple_dataset.privacy_mechanism import (
PrivacyMechanism,
)
from llama_index.packs.diff_private_simple_dataset.events import (
EmptyIntersectionEvent,
LLMEmptyResponseEvent,
SyntheticExampleEndEvent,
SyntheticExampleStartEvent,
)
from prv_accountant.privacy_random_variables import (
PoissonSubsampledGaussianMechanism,
PureDPMechanism,
)
from prv_accountant import PRVAccountant
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
STOP_TOKENS = {"<|endoftext|>", " END", "<|end|>"}
FALLBACK_SYNTHETIC_EXAMPLE = LabelledSimpleDataExample(
reference_label="FALLBACK",
text="DO NOT USE.",
text_by=CreatedBy(type=CreatedByType.HUMAN),
)
class PromptBundle(BaseModel):
instruction: str = Field(description="Instruction associated with underlying task.")
text_heading: str = Field(description="Heading used for text.")
label_heading: str = Field(description="Label heading used for label.")
def _batch(iterable, n=1) -> Iterable[Any]:
"""Return iterable batches of an iterable."""
length = len(iterable)
for ndx in range(0, length, n):
yield iterable[ndx : min(ndx + n, length)]
class DiffPrivateSimpleDatasetPack(BaseLlamaPack):
"""A pack for creating differentially private simple llama-dataset."""
def __init__(
self,
llm: LLM, # currently only supports OpenAI completion LLMs
tokenizer: Any,
prompt_bundle: PromptBundle,
simple_dataset: LabelledSimpleDataset,
batch_size: int = 5,
sleep_time_in_seconds: float = 0,
sephamore_counter_size: int = 1,
cache_checkpoints: bool = True,
show_progress: bool = True,
):
self.llm = llm
self.tokenizer = tokenizer
self.prompt_bundle = prompt_bundle
self.simple_dataset = simple_dataset
self._num_examples = len(self.simple_dataset.examples)
self.labels = list({el.reference_label for el in self.simple_dataset[:]})
self.sleep_time_in_seconds = sleep_time_in_seconds
self._semaphore = asyncio.Semaphore(sephamore_counter_size)
self.show_progress = show_progress
self.batch_size = batch_size
self.cache_checkpoints = cache_checkpoints
def sigma_to_eps(
self,
sigma: float,
mechanism: PrivacyMechanism,
size: int,
max_token_cnt: int,
max_self_compositions: int = 1000,
eps_error: float = 0.01,
delta_error: float = 1e-10,
) -> float:
"""Return the epsilon value given a sigma.
Args:
sigma (float): The parameter associated with noise mechanism.
mechanism (PrivacyMechanism): Noise mechanism.
size (int): Number of samples to be generated.
max_token_cnt (int): Number of tokens generated per sample.
max_self_compositions (int, optional): PRV algorithm parameter. Defaults to 1000.
eps_error (float, optional): PRV algorithm parameter. Defaults to 0.01.
delta_error (float, optional): PRV algorithm parameter. Defaults to 1e-10.
Returns:
float: The epsilon value.
"""
if max_token_cnt > max_self_compositions:
raise ValueError(
"`max_token_cnt` cannot be greater than `max_self_composition`."
)
sample_rate = size / self._num_examples
if mechanism == PrivacyMechanism.GAUSSIAN:
prv_0 = PoissonSubsampledGaussianMechanism(
noise_multiplier=sigma, sampling_probability=sample_rate
)
elif mechanism == PrivacyMechanism.EXPONENTIAL:
sigma_bar = math.log(1 + sample_rate * (math.exp(sigma) - 1))
prv_0 = PureDPMechanism(eps=sigma_bar)
else:
raise ValueError(
"Invalid value for mechanism entered."
" Please use either 'gaussian' or 'exponential'."
)
accountant = PRVAccountant(
prvs=[
prv_0,
],
max_self_compositions=[max_self_compositions],
eps_error=eps_error,
delta_error=delta_error,
)
_eps_low, eps_est, _eps_up = accountant.compute_epsilon(
delta=1 / self._num_examples, num_self_compositions=[max_token_cnt]
)
return eps_est
async def _async_worker(self, job: Coroutine) -> Any:
async with self._semaphore:
await asyncio.sleep(self.sleep_time_in_seconds)
return await job
@dispatcher.span
def _filter_dataset_by_label(self, label: str) -> LabelledSimpleDataset:
"""Filter simple_dataset by label."""
if label not in self.labels:
raise ValueError(
"There are no examples with `label` in the associated `simple_dataset`."
)
examples = [el for el in self.simple_dataset[:] if el.reference_label == label]
return LabelledSimpleDataset(examples=examples)
@dispatcher.span
def _split_dataset(
self,
dataset: LabelledSimpleDataset,
num_splits: int,
num_samples_per_split: int,
) -> List[LabelledSimpleDataset]:
"""Splits a dataset into a set of disjoint datasets with equal number of examples."""
indexes = list(range(len(dataset.examples)))
random.shuffle(indexes)
partitions = [indexes[i::num_splits] for i in range(num_splits)]
splits = []
for p in partitions:
sample = random.sample(p, num_samples_per_split)
if not len(sample) == num_samples_per_split:
raise ValueError(
"Not able to create disjoint sets with current values of `num_splits` and `num_samples_per_split`."
)
examples = [dataset.examples[ix] for ix in sample]
splits.append(LabelledSimpleDataset(examples=examples))
return splits
def _get_public_prompt(
self,
synthetic_example: str,
label: str,
) -> str:
"""Get completion prompt for token universe."""
return zero_shot_completion_template.format(
synthetic_text=synthetic_example,
label=label,
instruction=self.prompt_bundle.instruction,
label_heading=self.prompt_bundle.label_heading,
text_heading=self.prompt_bundle.text_heading,
)
def _get_private_prompt(
self,
split: LabelledSimpleDataset,
synthetic_example: str,
label: str,
) -> str:
"""Get prompt for completion endpoint."""
single_templates = [
single_example_template.format(
label_heading=self.prompt_bundle.label_heading,
text_heading=self.prompt_bundle.text_heading,
example_label=x.reference_label,
example_text=x.text,
)
for x in split.examples
]
few_shot_examples = reduce(lambda x, y: x + y, single_templates)
return few_shot_completion_template.format(
instruction=self.prompt_bundle.instruction,
label_heading=self.prompt_bundle.label_heading,
text_heading=self.prompt_bundle.text_heading,
few_shot_examples=few_shot_examples,
label=label,
synthetic_text=synthetic_example,
)
def _normalize(
self, split_probs: Dict[str, float], token_universe_proba: Dict[str, float]
) -> Dict[str, float]:
"""Normalize a probability distribution over tokens to become a valid probability distribution."""
scale = sum(proba for proba in split_probs.values())
if scale == 0:
# universe
dispatcher.event(
EmptyIntersectionEvent(
public_tokens=list(token_universe_proba),
private_tokens=list(split_probs),
)
)
split_probs = token_universe_proba # use public probas instead
scale = sum(proba for proba in split_probs.values())
return {token: proba / scale for token, proba in split_probs.items()}
def _extract_and_normalize_next_token_probas(
self, response: CompletionResponse, token_universe_probas: Dict[str, float]
) -> Dict[str, float]:
"""Extract and normalize LogProba from a CompletionResponse."""
try:
next_token_proba_distn = response.logprobs[0]
except IndexError:
dispatcher.event(LLMEmptyResponseEvent())
return token_universe_probas
except Exception as e:
raise ValueError(
"Something went wrong when trying to get LogProb from CompletionResponse."
)
split_probs = {t: 0 for t in token_universe_probas}
for el in next_token_proba_distn: # for immediate next token only
if el.token in split_probs:
split_probs[el.token] = np.exp(el.logprob)
return self._normalize(
split_probs, token_universe_probas
) # to make into a valid prob distribution
def _generate_noise(
self, sigma: float, size: int, mechanism: PrivacyMechanism
) -> float:
"""Generates noise that satisfies eps-delta differential privacy."""
noise_rng = np.random.RandomState()
if mechanism == PrivacyMechanism.GAUSSIAN:
return noise_rng.normal(0, sigma, size=size)
elif mechanism == PrivacyMechanism.LAPLACE:
return noise_rng.exponential(scale=sigma, size=size)
else:
raise ValueError("Value entered for `mechanism` is not supported.")
def _merge_probas(self, list_of_probas: List[Dict[str, float]]) -> Dict[str, float]:
"""Merges a set of probabillity distributions over a common token universe."""
scale = len(list_of_probas)
tokens = list_of_probas[0].keys()
merged_distribution = {}
for token in tokens:
merged_distribution[token] = sum(pr[token] / scale for pr in list_of_probas)
return merged_distribution
def _add_noise(
self, proba: Dict[str, float], noise_array=Sequence[float]
) -> Dict[str, float]:
"""Add noise to proba distribution."""
return {
token: proba + noise
for (token, proba), noise in zip(proba.items(), noise_array)
}
def _mode_of_distribution(self, proba: Dict[str, float]) -> str:
"""Returns the mode of a given probability distribution."""
return max(proba, key=proba.get)
@dispatcher.span
def generate_dp_synthetic_example(
self,
label: str,
t_max: int = 1,
sigma: float = 0.5,
num_splits: int = 5,
num_samples_per_split: int = 1,
) -> LabelledSimpleDataExample:
"""Generates a differentially private synthetic example."""
return asyncio.run(
self.agenerate_dp_synthetic_example(
label=label,
t_max=t_max,
sigma=sigma,
num_splits=num_splits,
num_samples_per_split=num_samples_per_split,
)
)
@dispatcher.span
async def agenerate_dp_synthetic_example(
self,
label: str,
t_max: int = 1,
sigma: float = 0.5,
num_splits: int = 5,
num_samples_per_split: int = 1,
) -> LabelledSimpleDataExample:
"""Generates a differentially private synthetic example."""
dispatcher.event(SyntheticExampleStartEvent())
synthetic_example = ""
iterator = range(1, t_max + 1)
if self.show_progress:
iterator = tqdm.tqdm(iterator, position=0, leave=True)
for _ in iterator:
token_universe_prompt = self._get_public_prompt(
synthetic_example=synthetic_example, label=label
)
try:
response = await self._async_worker(
self.llm.acomplete(token_universe_prompt)
)
token_universe_probas = {
el.token: np.exp(el.logprob)
for el in response.logprobs[0] # only for next immediate token
}
except IndexError as e:
continue # try again in next iteration
# filter dataset by label
filtered_simple_dataset = self._filter_dataset_by_label(label=label)
# split the private dataset
disjoint_splits = self._split_dataset(
dataset=filtered_simple_dataset,
num_splits=num_splits,
num_samples_per_split=num_samples_per_split,
)
# generate next token probability distributions per split
split_tasks = []
for split in disjoint_splits:
prompt = self._get_private_prompt(split, synthetic_example, label)
split_tasks.append(self._async_worker(self.llm.acomplete(prompt)))
split_responses: List[CompletionResponse] = await asyncio.gather(
*split_tasks
)
# get and normalized next-token probas per split
splits = [
self._extract_and_normalize_next_token_probas(
response, token_universe_probas
)
for response in split_responses
]
# noisy aggrergation
sigma_calib = np.sqrt(2) / num_splits * sigma
noise_array = self._generate_noise(
sigma=sigma_calib, size=len(token_universe_probas), mechanism="gaussian"
)
merged_probas = self._merge_probas(splits)
noisy_probs = self._add_noise(merged_probas, noise_array)
# next token
next_token = self._mode_of_distribution(noisy_probs)
if next_token in STOP_TOKENS:
break
else:
synthetic_example += next_token
# synthetic example remove [RESULT]
try:
synthetic_example = synthetic_example.split("[RESULT]")[-1].strip()
except Exception as e:
synthetic_example = synthetic_example
simple_example = LabelledSimpleDataExample(
reference_label=label,
text=synthetic_example,
text_by=CreatedBy(type=CreatedByType.AI, model_name=self.llm.model),
)
dispatcher.event(SyntheticExampleEndEvent())
return simple_example
@dispatcher.span
def run(
self,
sizes: Union[int, Dict[str, int]],
t_max: int = 1,
sigma: float = 0.5,
num_splits: int = 5,
num_samples_per_split: int = 1,
) -> LabelledSimpleDataset:
"""Main run method."""
if num_samples_per_split < 1:
raise ValueError(
"`num_samples_per_split` must be an integer greater than 1."
)
if isinstance(sizes, int):
sizes_dict = {d: sizes for d in self.labels}
elif isinstance(sizes, dict):
sizes_dict = sizes
else:
raise TypeError(
"Invalid type of `sizes`. Must be either an `int` or `dict`."
)
if not all(c in sizes_dict for c in self.labels):
raise ValueError("Not all labels have sizes.")
examples = []
for label in self.labels:
size = sizes_dict[label]
for _ in range(size):
example = self.generate_dp_synthetic_example(
label=label,
t_max=t_max,
sigma=sigma,
num_splits=num_splits,
num_samples_per_split=num_samples_per_split,
)
examples.append(example)
return LabelledSimpleDataset(examples=examples)
@dispatcher.span
async def arun(
self,
sizes: Dict[str, int],
t_max: int = 1,
sigma: float = 0.5,
num_splits: int = 5,
num_samples_per_split: int = 1,
) -> LabelledSimpleDataset:
"""Main async run method."""
if num_samples_per_split < 1:
raise ValueError(
"`num_samples_per_split` must be an integer greater than 1."
)
if isinstance(sizes, int):
sizes_dict = {d: sizes for d in self.labels}
elif isinstance(sizes, dict):
sizes_dict = sizes
else:
raise TypeError(
"Invalid type of `sizes`. Must be either an `int` or `dict`."
)
if not all(c in sizes_dict for c in self.labels):
raise ValueError("Not all labels have sizes.")
tasks = []
for label in self.labels:
size = sizes_dict[label]
for _ in range(size):
example_task = self.agenerate_dp_synthetic_example(
label=label,
t_max=t_max,
sigma=sigma,
num_splits=num_splits,
num_samples_per_split=num_samples_per_split,
)
tasks.append(example_task)
asyncio_runner = asyncio_module(self.show_progress)
# run in batch
examples = []
for batch in _batch(tasks, self.batch_size):
batch_examples = await asyncio_runner.gather(*batch)
examples += batch_examples
if self.cache_checkpoints:
checkpoint = LabelledSimpleDataset(examples=examples)
checkpoint.save_json("checkpoint.json")
return LabelledSimpleDataset(examples=examples)
| [
"llama_index.packs.diff_private_simple_dataset.events.SyntheticExampleEndEvent",
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.bridge.pydantic.Field",
"llama_index.packs.diff_private_simple_dataset.templates.zero_shot_completion_template.format",
"llama_index.packs.diff_private_simple_dataset.templates.single_example_template.format",
"llama_index.packs.diff_private_simple_dataset.events.SyntheticExampleStartEvent",
"llama_index.packs.diff_private_simple_dataset.events.LLMEmptyResponseEvent",
"llama_index.core.llama_dataset.simple.LabelledSimpleDataset",
"llama_index.packs.diff_private_simple_dataset.templates.few_shot_completion_template.format",
"llama_index.core.async_utils.asyncio_module",
"llama_index.core.llama_dataset.base.CreatedBy"
] | [((1280, 1315), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (1305, 1315), True, 'import llama_index.core.instrumentation as instrument\n'), ((1589, 1654), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction associated with underlying task."""'}), "(description='Instruction associated with underlying task.')\n", (1594, 1654), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1679, 1722), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Heading used for text."""'}), "(description='Heading used for text.')\n", (1684, 1722), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1748, 1798), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Label heading used for label."""'}), "(description='Label heading used for label.')\n", (1753, 1798), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1494, 1529), 'llama_index.core.llama_dataset.base.CreatedBy', 'CreatedBy', ([], {'type': 'CreatedByType.HUMAN'}), '(type=CreatedByType.HUMAN)\n', (1503, 1529), False, 'from llama_index.core.llama_dataset.base import CreatedBy, CreatedByType\n'), ((2917, 2958), 'asyncio.Semaphore', 'asyncio.Semaphore', (['sephamore_counter_size'], {}), '(sephamore_counter_size)\n', (2934, 2958), False, 'import asyncio\n'), ((4823, 4947), 'prv_accountant.PRVAccountant', 'PRVAccountant', ([], {'prvs': '[prv_0]', 'max_self_compositions': '[max_self_compositions]', 'eps_error': 'eps_error', 'delta_error': 'delta_error'}), '(prvs=[prv_0], max_self_compositions=[max_self_compositions],\n eps_error=eps_error, delta_error=delta_error)\n', (4836, 4947), False, 'from prv_accountant import PRVAccountant\n'), ((5814, 5854), 'llama_index.core.llama_dataset.simple.LabelledSimpleDataset', 'LabelledSimpleDataset', ([], {'examples': 'examples'}), '(examples=examples)\n', (5835, 5854), False, 'from llama_index.core.llama_dataset.simple import LabelledSimpleDataset, LabelledSimpleDataExample\n'), ((6209, 6232), 'random.shuffle', 'random.shuffle', (['indexes'], {}), '(indexes)\n', (6223, 6232), False, 'import random\n'), ((6978, 7213), 'llama_index.packs.diff_private_simple_dataset.templates.zero_shot_completion_template.format', 'zero_shot_completion_template.format', ([], {'synthetic_text': 'synthetic_example', 'label': 'label', 'instruction': 'self.prompt_bundle.instruction', 'label_heading': 'self.prompt_bundle.label_heading', 'text_heading': 'self.prompt_bundle.text_heading'}), '(synthetic_text=synthetic_example,\n label=label, instruction=self.prompt_bundle.instruction, label_heading=\n self.prompt_bundle.label_heading, text_heading=self.prompt_bundle.\n text_heading)\n', (7014, 7213), False, 'from llama_index.packs.diff_private_simple_dataset.templates import zero_shot_completion_template, few_shot_completion_template, single_example_template\n'), ((7843, 7887), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'single_templates'], {}), '(lambda x, y: x + y, single_templates)\n', (7849, 7887), False, 'from functools import reduce\n'), ((7903, 8174), 'llama_index.packs.diff_private_simple_dataset.templates.few_shot_completion_template.format', 'few_shot_completion_template.format', ([], {'instruction': 'self.prompt_bundle.instruction', 'label_heading': 'self.prompt_bundle.label_heading', 'text_heading': 'self.prompt_bundle.text_heading', 'few_shot_examples': 'few_shot_examples', 'label': 'label', 'synthetic_text': 'synthetic_example'}), '(instruction=self.prompt_bundle.\n instruction, label_heading=self.prompt_bundle.label_heading,\n text_heading=self.prompt_bundle.text_heading, few_shot_examples=\n few_shot_examples, label=label, synthetic_text=synthetic_example)\n', (7938, 8174), False, 'from llama_index.packs.diff_private_simple_dataset.templates import zero_shot_completion_template, few_shot_completion_template, single_example_template\n'), ((10193, 10216), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (10214, 10216), True, 'import numpy as np\n'), ((16726, 16766), 'llama_index.core.llama_dataset.simple.LabelledSimpleDataset', 'LabelledSimpleDataset', ([], {'examples': 'examples'}), '(examples=examples)\n', (16747, 16766), False, 'from llama_index.core.llama_dataset.simple import LabelledSimpleDataset, LabelledSimpleDataExample\n'), ((18105, 18139), 'llama_index.core.async_utils.asyncio_module', 'asyncio_module', (['self.show_progress'], {}), '(self.show_progress)\n', (18119, 18139), False, 'from llama_index.core.async_utils import asyncio_module\n'), ((18524, 18564), 'llama_index.core.llama_dataset.simple.LabelledSimpleDataset', 'LabelledSimpleDataset', ([], {'examples': 'examples'}), '(examples=examples)\n', (18545, 18564), False, 'from llama_index.core.llama_dataset.simple import LabelledSimpleDataset, LabelledSimpleDataExample\n'), ((4319, 4415), 'prv_accountant.privacy_random_variables.PoissonSubsampledGaussianMechanism', 'PoissonSubsampledGaussianMechanism', ([], {'noise_multiplier': 'sigma', 'sampling_probability': 'sample_rate'}), '(noise_multiplier=sigma,\n sampling_probability=sample_rate)\n', (4353, 4415), False, 'from prv_accountant.privacy_random_variables import PoissonSubsampledGaussianMechanism, PureDPMechanism\n'), ((6376, 6415), 'random.sample', 'random.sample', (['p', 'num_samples_per_split'], {}), '(p, num_samples_per_split)\n', (6389, 6415), False, 'import random\n'), ((7510, 7697), 'llama_index.packs.diff_private_simple_dataset.templates.single_example_template.format', 'single_example_template.format', ([], {'label_heading': 'self.prompt_bundle.label_heading', 'text_heading': 'self.prompt_bundle.text_heading', 'example_label': 'x.reference_label', 'example_text': 'x.text'}), '(label_heading=self.prompt_bundle.\n label_heading, text_heading=self.prompt_bundle.text_heading,\n example_label=x.reference_label, example_text=x.text)\n', (7540, 7697), False, 'from llama_index.packs.diff_private_simple_dataset.templates import zero_shot_completion_template, few_shot_completion_template, single_example_template\n'), ((12411, 12439), 'llama_index.packs.diff_private_simple_dataset.events.SyntheticExampleStartEvent', 'SyntheticExampleStartEvent', ([], {}), '()\n', (12437, 12439), False, 'from llama_index.packs.diff_private_simple_dataset.events import EmptyIntersectionEvent, LLMEmptyResponseEvent, SyntheticExampleEndEvent, SyntheticExampleStartEvent\n'), ((12566, 12609), 'tqdm.tqdm', 'tqdm.tqdm', (['iterator'], {'position': '(0)', 'leave': '(True)'}), '(iterator, position=0, leave=True)\n', (12575, 12609), False, 'import tqdm\n'), ((15346, 15372), 'llama_index.packs.diff_private_simple_dataset.events.SyntheticExampleEndEvent', 'SyntheticExampleEndEvent', ([], {}), '()\n', (15370, 15372), False, 'from llama_index.packs.diff_private_simple_dataset.events import EmptyIntersectionEvent, LLMEmptyResponseEvent, SyntheticExampleEndEvent, SyntheticExampleStartEvent\n'), ((4592, 4622), 'prv_accountant.privacy_random_variables.PureDPMechanism', 'PureDPMechanism', ([], {'eps': 'sigma_bar'}), '(eps=sigma_bar)\n', (4607, 4622), False, 'from prv_accountant.privacy_random_variables import PoissonSubsampledGaussianMechanism, PureDPMechanism\n'), ((5325, 5366), 'asyncio.sleep', 'asyncio.sleep', (['self.sleep_time_in_seconds'], {}), '(self.sleep_time_in_seconds)\n', (5338, 5366), False, 'import asyncio\n'), ((6734, 6774), 'llama_index.core.llama_dataset.simple.LabelledSimpleDataset', 'LabelledSimpleDataset', ([], {'examples': 'examples'}), '(examples=examples)\n', (6755, 6774), False, 'from llama_index.core.llama_dataset.simple import LabelledSimpleDataset, LabelledSimpleDataExample\n'), ((9837, 9855), 'numpy.exp', 'np.exp', (['el.logprob'], {}), '(el.logprob)\n', (9843, 9855), True, 'import numpy as np\n'), ((13959, 13987), 'asyncio.gather', 'asyncio.gather', (['*split_tasks'], {}), '(*split_tasks)\n', (13973, 13987), False, 'import asyncio\n'), ((15250, 15309), 'llama_index.core.llama_dataset.base.CreatedBy', 'CreatedBy', ([], {'type': 'CreatedByType.AI', 'model_name': 'self.llm.model'}), '(type=CreatedByType.AI, model_name=self.llm.model)\n', (15259, 15309), False, 'from llama_index.core.llama_dataset.base import CreatedBy, CreatedByType\n'), ((18411, 18451), 'llama_index.core.llama_dataset.simple.LabelledSimpleDataset', 'LabelledSimpleDataset', ([], {'examples': 'examples'}), '(examples=examples)\n', (18432, 18451), False, 'from llama_index.core.llama_dataset.simple import LabelledSimpleDataset, LabelledSimpleDataExample\n'), ((9389, 9412), 'llama_index.packs.diff_private_simple_dataset.events.LLMEmptyResponseEvent', 'LLMEmptyResponseEvent', ([], {}), '()\n', (9410, 9412), False, 'from llama_index.packs.diff_private_simple_dataset.events import EmptyIntersectionEvent, LLMEmptyResponseEvent, SyntheticExampleEndEvent, SyntheticExampleStartEvent\n'), ((13000, 13018), 'numpy.exp', 'np.exp', (['el.logprob'], {}), '(el.logprob)\n', (13006, 13018), True, 'import numpy as np\n'), ((14358, 14368), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14365, 14368), True, 'import numpy as np\n'), ((4550, 4565), 'math.exp', 'math.exp', (['sigma'], {}), '(sigma)\n', (4558, 4565), False, 'import math\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index", "import llama_index"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index", "from llama_index"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
init_file_path = local_dir_path / "__init__.py"
# if the __init__.py file do not exists, we need to create it
mode = "a+" if not os.path.exists(init_file_path) else "r+"
with open(init_file_path, mode) as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
# loads the module into memory
if override_path:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{base_file_name}"
)
if spec is None:
raise ValueError(f"Could not find file: {dirpath}/{base_file_name}.")
else:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{module_id}/{base_file_name}"
)
if spec is None:
raise ValueError(
f"Could not find file: {dirpath}/{module_id}/{base_file_name}."
)
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.download.utils.get_exports",
"llama_index.download.utils.initialize_directory"
] | [((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5550, 5583), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5564, 5583), False, 'import os\n'), ((7403, 7471), 'llama_index.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7423, 7471), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8849, 8876), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8870, 8876), False, 'from importlib import util\n'), ((1197, 1217), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1201, 1217), False, 'from pathlib import Path\n'), ((1434, 1468), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1448, 1468), False, 'import os\n'), ((1938, 1969), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1948, 1969), False, 'import json\n'), ((2263, 2298), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2278, 2298), False, 'import os\n'), ((3131, 3151), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3135, 3151), False, 'from pathlib import Path\n'), ((3347, 3386), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3358, 3386), False, 'import os\n'), ((5139, 5172), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5153, 5172), False, 'import os\n'), ((8326, 8416), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{base_file_name}')\n", (8354, 8416), False, 'from importlib import util\n'), ((8566, 8668), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{module_id}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{module_id}/{base_file_name}')\n", (8594, 8668), False, 'from importlib import util\n'), ((9299, 9401), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9312, 9401), False, 'import requests\n'), ((1536, 1548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1545, 1548), False, 'import json\n'), ((3310, 3337), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3324, 3337), False, 'import os\n'), ((4385, 4420), 'llama_index.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4396, 4420), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2356, 2389), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2370, 2389), False, 'import os\n'), ((2407, 2437), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2418, 2437), False, 'import os\n'), ((5835, 5927), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5856, 5927), False, 'import subprocess\n'), ((4620, 4650), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4634, 4650), False, 'import os\n'), ((5675, 5698), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5679, 5698), False, 'from pathlib import Path\n')] |
Subsets and Splits