johannoriel commited on
Commit
d8bdf2c
1 Parent(s): bf628d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -31
app.py CHANGED
@@ -2,15 +2,10 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoTokenizer, AutoModel
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
- from langchain.embeddings import HuggingFaceEmbeddings
6
- from langchain_community.embeddings import HuggingFaceEmbeddings
7
  import fitz # PyMuPDF
8
- import os
9
- import hashlib
10
 
11
- # Directory to store cached files
12
- CACHE_DIR = "pdf_cache"
13
- os.makedirs(CACHE_DIR, exist_ok=True)
14
 
15
  def get_hf_models():
16
  return ["Qwen/Qwen2.5-3B-Instruct", "HuggingFaceH4/zephyr-7b-beta", "mistralai/Mistral-7B-Instruct-v0.1"]
@@ -42,46 +37,35 @@ def no_rag(query, client):
42
  response = client.text_generation(query, max_new_tokens=512)
43
  return response
44
 
45
- def cache_file(file):
46
- if file is None:
47
- return None
48
- file_hash = hashlib.md5(file.read()).hexdigest()
49
- cached_path = os.path.join(CACHE_DIR, f"{file_hash}.pdf")
50
- if not os.path.exists(cached_path):
51
- with open(cached_path, "wb") as f:
52
- file.seek(0)
53
- f.write(file.read())
54
- return cached_path
55
-
56
- def get_cached_files():
57
- return [f for f in os.listdir(CACHE_DIR) if f.endswith('.pdf')]
58
-
59
- def process_query(query, pdf_file, cached_file, llm_choice, embedder_choice):
60
  client = InferenceClient(llm_choice)
61
  no_rag_response = no_rag(query, client)
62
 
63
- if pdf_file is not None:
64
- pdf_path = cache_file(pdf_file)
65
- elif cached_file:
66
- pdf_path = os.path.join(CACHE_DIR, cached_file)
67
- else:
68
  return no_rag_response, "RAG non utilisé (pas de fichier PDF)", "RAG non utilisé (pas de fichier PDF)", "Pas de fichier PDF fourni", "Pas de contexte extrait"
69
 
70
  full_text = extract_text_from_pdf(pdf_path)
71
- manual_rag_response = manual_rag(query, full_text, client)
 
 
 
 
 
 
72
  classic_rag_response, classic_rag_context = classic_rag(query, pdf_path, client, embedder_choice)
73
 
74
  return no_rag_response, manual_rag_response, classic_rag_response, full_text, classic_rag_context
75
 
 
76
  iface = gr.Interface(
77
  fn=process_query,
78
  inputs=[
79
  gr.Textbox(label="Votre question"),
80
  gr.File(label="Chargez un nouveau PDF"),
81
- gr.Dropdown(choices=get_cached_files, label="Ou choisissez un PDF déjà téléversé", interactive=True),
82
- gr.Dropdown(choices=get_hf_models(), label="Choisissez le LLM", value="Qwen/Qwen2.5-3B-Instruct"),
83
  gr.Dropdown(choices=["sentence-transformers/all-MiniLM-L6-v2", "nomic-ai/nomic-embed-text-v1.5"],
84
- label="Choisissez l'Embedder", value="sentence-transformers/all-MiniLM-L6-v2")
 
85
  ],
86
  outputs=[
87
  gr.Textbox(label="Réponse sans RAG"),
 
2
  from huggingface_hub import InferenceClient
3
  from transformers import AutoTokenizer, AutoModel
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+ from langchain_huggingface import HuggingFaceEmbeddings
6
+ from langchain_community.vectorstores import FAISS
7
  import fitz # PyMuPDF
 
 
8
 
 
 
 
9
 
10
  def get_hf_models():
11
  return ["Qwen/Qwen2.5-3B-Instruct", "HuggingFaceH4/zephyr-7b-beta", "mistralai/Mistral-7B-Instruct-v0.1"]
 
37
  response = client.text_generation(query, max_new_tokens=512)
38
  return response
39
 
40
+ def process_query(query, pdf_path, llm_choice, embedder_choice, use_manual_rag):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  client = InferenceClient(llm_choice)
42
  no_rag_response = no_rag(query, client)
43
 
44
+ if pdf_path is None:
 
 
 
 
45
  return no_rag_response, "RAG non utilisé (pas de fichier PDF)", "RAG non utilisé (pas de fichier PDF)", "Pas de fichier PDF fourni", "Pas de contexte extrait"
46
 
47
  full_text = extract_text_from_pdf(pdf_path)
48
+
49
+ # RAG manuel seulement si choisi
50
+ if use_manual_rag == "Oui":
51
+ manual_rag_response = manual_rag(query, full_text, client)
52
+ else:
53
+ manual_rag_response = "RAG manuel non utilisé"
54
+
55
  classic_rag_response, classic_rag_context = classic_rag(query, pdf_path, client, embedder_choice)
56
 
57
  return no_rag_response, manual_rag_response, classic_rag_response, full_text, classic_rag_context
58
 
59
+
60
  iface = gr.Interface(
61
  fn=process_query,
62
  inputs=[
63
  gr.Textbox(label="Votre question"),
64
  gr.File(label="Chargez un nouveau PDF"),
65
+ gr.Dropdown(choices=get_hf_models(), label="Choisissez le LLM", value="HuggingFaceH4/zephyr-7b-beta"),
 
66
  gr.Dropdown(choices=["sentence-transformers/all-MiniLM-L6-v2", "nomic-ai/nomic-embed-text-v1.5"],
67
+ label="Choisissez l'Embedder", value="sentence-transformers/all-MiniLM-L6-v2"),
68
+ gr.Dropdown(choices=["Oui", "Non"], label="Utiliser RAG manuel ?", value="Non") # Ajout de la combobox pour choisir RAG manuel
69
  ],
70
  outputs=[
71
  gr.Textbox(label="Réponse sans RAG"),