Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
import streamlit as st | |
import pandas as pd | |
from langchain_text_splitters import TokenTextSplitter | |
from langchain.docstore.document import Document | |
from torch import cuda | |
from langchain_community.embeddings import HuggingFaceEmbeddings, HuggingFaceInferenceAPIEmbeddings | |
device = 'cuda' if cuda.is_available() else 'cpu' | |
st.set_page_config(page_title="SEARCH IATI",layout='wide') | |
st.title("SEARCH IATI Database") | |
var=st.text_input("enter keyword") | |
title = var.replace(' ','+') | |
def create_chunks(text): | |
text_splitter = TokenTextSplitter(chunk_size=500, chunk_overlap=0) | |
texts = text_splitter.split_text(text) | |
return texts | |
def get_chunks(): | |
orgas_df = pd.read_csv("iati_files/project_orgas.csv") | |
region_df = pd.read_csv("iati_files/project_region.csv") | |
sector_df = pd.read_csv("iati_files/project_sector.csv") | |
status_df = pd.read_csv("iati_files/project_status.csv") | |
texts_df = pd.read_csv("iati_files/project_texts.csv") | |
projects_df = pd.merge(orgas_df, region_df, on='iati_id', how='inner') | |
projects_df = pd.merge(projects_df, sector_df, on='iati_id', how='inner') | |
projects_df = pd.merge(projects_df, status_df, on='iati_id', how='inner') | |
projects_df = pd.merge(projects_df, texts_df, on='iati_id', how='inner') | |
giz_df = projects_df[projects_df.client.str.contains('bmz')].reset_index(drop=True) | |
giz_df.drop(columns= ['orga_abbreviation', 'client', | |
'orga_full_name', 'country', | |
'country_flag', 'crs_5_code', 'crs_3_code', | |
'sgd_pred_code'], inplace=True) | |
giz_df['text_size'] = giz_df.apply(lambda x: len((x['title_main'] + x['description_main']).split()), axis=1) | |
giz_df['chunks'] = giz_df.apply(lambda x:create_chunks(x['title_main'] + x['description_main']),axis=1) | |
giz_df = giz_df.explode(column=['chunks'], ignore_index=True) | |
placeholder= [] | |
for i in range(len(giz_df)): | |
placeholder.append(Document(page_content= giz_df.loc[i,'chunks'], | |
metadata={"iati_id": giz_df.loc[i,'iati_id'], | |
"iati_orga_id":giz_df.loc[i,'iati_orga_id'], | |
"country_name":str(giz_df.loc[i,'country_name']), | |
"crs_5_name": giz_df.loc[i,'crs_5_name'], | |
"crs_3_name": giz_df.loc[i,'crs_3_name'], | |
"sgd_pred_str":giz_df.loc[i,'sgd_pred_str'], | |
"status":giz_df.loc[i,'status'], | |
"title_main":giz_df.loc[i,'title_main'],})) | |
return placeholder | |
def embed_chunks(chunks): | |
embeddings = HuggingFaceEmbeddings( | |
model_kwargs = {'device': device}, | |
encode_kwargs = {'normalize_embeddings': True}, | |
model_name='BAAI/bge-m3' | |
) | |
# placeholder for collection | |
qdrant_collections = {} | |
qdrant_collections['all'] = Qdrant.from_documents( | |
chunks, | |
embeddings, | |
path="/data/local_qdrant", | |
collection_name='all', | |
) | |
print(qdrant_collections) | |
print("vector embeddings done") | |
return qdrant_collections | |
chunks = get_chunks() | |
qdrant_col = embed_chunks(chunks) | |
button=st.button("search") | |
if button : | |
st.write(chunks[0]) |