File size: 2,880 Bytes
db82f0c
 
 
 
235c9aa
06820d3
d2c92c0
06820d3
 
 
 
 
db82f0c
 
 
 
 
 
 
 
 
 
 
a44cabe
db82f0c
 
 
4de0864
db82f0c
 
 
4de0864
db82f0c
 
 
 
4de0864
 
db82f0c
 
 
 
 
 
 
4de0864
 
db82f0c
 
 
 
 
 
 
 
 
 
4de0864
db82f0c
 
 
 
 
 
 
 
 
 
 
 
18f0b90
db82f0c
8446f7a
 
db82f0c
8446f7a
db82f0c
 
 
8446f7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db82f0c
8446f7a
 
db82f0c
8446f7a
18f0b90
 
8446f7a
18f0b90
 
8446f7a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import pandas as pd
df = pd.read_csv('./anime.csv')

context_data = []
for i in range(min(len(df), 100)):  # Loop over rows
    context = ""
    for j in range(7):  # Loop over the first 8 columns
        context += df.columns[j]  # Add column name
        context += ": "
        context += str(df.iloc[i][j])  # Convert value to string
        context += " "
    context_data.append(context)

import os

# Get the secret key from the environment
groq_key = os.environ.get('Animepedia')

## LLM used for RAG
from langchain_groq import ChatGroq

llm = ChatGroq(model="llama-3.1-70b-versatile",api_key=groq_key)

## Embedding model
from langchain_huggingface import HuggingFaceEmbeddings
embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")

# create vector store
from langchain_chroma import Chroma

vectorstore = Chroma(
    collection_name="Anime_dataset_store",
    embedding_function=embed_model,
    persist_directory="./",
)

vectorstore.get().keys()

# add data to vector nstore
vectorstore.add_texts(context_data)

retriever = vectorstore.as_retriever()

from langchain_core.prompts import PromptTemplate

# Modified template for anime dataset
template = ("""You are an anime expert.
    Use the provided context to answer the question.
    If you don't know the answer, say so. Explain your answer in detail.
    Do not discuss the context in your response; just provide the answer directly.

    Context: {context}

    Question: {question}

    Answer:""")

# Create the prompt
rag_prompt = PromptTemplate.from_template(template)

from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough

rag_chain = (
    {"context": retriever, "question": RunnablePassthrough()}
    | rag_prompt
    | llm
    | StrOutputParser()
)

import gradio as gr

# Function to handle chat input and generate responses using rag_chain
def animepedia_memory_stream(message, history):
    partial_text = ""
    for new_text in rag_chain.stream(message):  # Assuming rag_chain is configured for Animepedia
        partial_text += new_text
        yield partial_text

# Examples of user queries for Animepedia
examples = [
    "What is the highest-rated action anime?",
    "Can you recommend an anime with less than 12 episodes?",
    "Tell me about a family-friendly anime.",
]

# Description and title for the Animepedia chatbot
description = "Real-time Anime Companion to Answer Questions and Provide Recommendations About Your Favorite Shows."
title = "Animepedia: Your Ultimate Anime Guide"

# Creating the Gradio Chat Interface
demo = gr.ChatInterface(
    fn=animepedia_memory_stream,
    type="messages",
    title=title,
    description=description,
    fill_height=True,
    examples=examples,
    theme="glass",
)

# Launching the chatbot interface
if __name__ == "__main__":
    demo.launch()