Kid Omar Costelo commited on
Commit
091f9c5
·
1 Parent(s): 28c2d6f

Refactor app.py to add FastAPI endpoints for scoring essays and generating prompts

Browse files
Files changed (1) hide show
  1. app.py +71 -0
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
+ from transformers import pipeline
4
+
5
+ class Message(BaseModel):
6
+ essay: str
7
+ instruction: str
8
+
9
+ # Getting the prompt from the prompt.txt file
10
+ prompt_dir = "prompt.txt"
11
+ prompt = ''
12
+ with open(prompt_dir, 'r') as file:
13
+ prompt = file.read()
14
+
15
+
16
+ def post_process(essay):
17
+ # Find the index of the first occurrence of the word "Feedback:"
18
+ feedback_index = essay.find("Feedback:")
19
+
20
+ # If "Feedback:" is not found, return the original essay
21
+ if feedback_index == -1:
22
+ return essay
23
+
24
+ # Find the index of the newline after the first occurrence of "Feedback:"
25
+ newline_index = essay.find("\n", feedback_index)
26
+
27
+ # If newline is not found, return the original essay
28
+ if newline_index == -1:
29
+ return essay
30
+
31
+ # Return the essay up to the newline after the first occurrence of "Feedback:"
32
+ return essay[:newline_index]
33
+
34
+ def pre_process(instruction, essay):
35
+ text = f"Instruction:{instruction}\nEssay:{essay}"
36
+ return text
37
+
38
+ def generate_prompt(input):
39
+ text = f"""{prompt}\n{input}"""
40
+ return text
41
+
42
+ # Initialize your pipeline outside of your route functions
43
+ pipe = pipeline(
44
+ "text-generation",
45
+ model="gildead/mistral-aes-414",
46
+ device_map="auto"
47
+ )
48
+
49
+ # Initialize your FastAPI application
50
+ app = FastAPI()
51
+
52
+ # Define your route functions
53
+ @app.get("/")
54
+ async def root():
55
+ return {"message": "Mistral API is running."}
56
+
57
+ @app.post("/score")
58
+ async def overall(message: Message):
59
+ text = pre_process(message.instruction, message.essay)
60
+ prompt = generate_prompt(text)
61
+
62
+ result = pipe(
63
+ f"<s>[INST] {prompt} [/INST]",
64
+ max_new_tokens=200,
65
+ num_return_sequences=1,)
66
+
67
+ generated_text = result[0]['generated_text']
68
+ output = generated_text.split('[/INST]', 1)[-1].strip()
69
+ final_output = post_process(output)
70
+
71
+ return {"result": final_output}