Yoxas commited on
Commit
22707c7
·
verified ·
1 Parent(s): 24d3092

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -14,13 +14,17 @@ model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever
14
  inputs = tokenizer(contexts, return_tensors='pt', padding=True, truncation=True)
15
  labels = tokenizer(responses, return_tensors='pt', padding=True, truncation=True)
16
 
 
 
 
17
  # Load your dataset
18
  df = pd.read_csv('10kstats.csv')
19
 
20
- # Ensure the dataset has the required columns for RAG
21
- # For example, it should have 'context' and 'response' columns
22
- contexts = df['Abstract'].tolist()
23
- #responses = df['response'].tolist()
 
24
 
25
  @spaces.GPU
26
  def generate_response(input_text):
 
14
  inputs = tokenizer(contexts, return_tensors='pt', padding=True, truncation=True)
15
  labels = tokenizer(responses, return_tensors='pt', padding=True, truncation=True)
16
 
17
+ # Extract the abstracts
18
+ abstracts = df['Abstract'].dropna().tolist()
19
+
20
  # Load your dataset
21
  df = pd.read_csv('10kstats.csv')
22
 
23
+ # Generate context-response pairs (abstract-question pairs)
24
+ # Here we use the abstracts as contexts and simulate questions
25
+
26
+ contexts = abstracts
27
+ responses = ["Can you tell me more about this research?" for _ in abstracts]
28
 
29
  @spaces.GPU
30
  def generate_response(input_text):