Spaces:
Sleeping
Sleeping
lukestanley
commited on
Commit
·
327982a
1
Parent(s):
0519e07
Slight refactor tidying
Browse files
chill.py
CHANGED
@@ -1,3 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# Download the model: https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf?download=true
|
2 |
# Rename it as needed.
|
3 |
# Install the server and start it:
|
@@ -10,69 +17,13 @@
|
|
10 |
original_text = """Stop chasing dreams instead. Life is not a Hollywood movie. Not everyone is going to get a famous billionaire. Adjust your expectations to reality, and stop thinking so highly of yourself, stop judging others. Assume the responsibility for the things that happen in your life. It is kind of annoying to read your text, it is always some external thing that "happened" to you, and it is always other people who are not up to your standards. At some moment you even declare with despair. And guess what? This is true and false at the same time, in a fundamental level most people are not remarkable, and you probably aren't too. But at the same time, nobody is the same, you have worth just by being, and other people have too. The impression I get is that you must be someone incredibly annoying to work with, and that your performance is not even nearly close to what you think it is, and that you really need to come down to earth. Stop looking outside, work on yourself instead. You'll never be satisfied just by changing jobs. Do therapy if you wish, become acquainted with stoicism, be a volunteer in some poor country, whatever, but do something to regain control of your life, to get some perspective, and to adjust your expectations to reality."""
|
11 |
# From elzbardico on https://news.ycombinator.com/item?id=36119858
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
from typing import Dict, Any, Union
|
16 |
-
from pydantic import BaseModel, Field
|
17 |
-
import requests
|
18 |
-
from llama_cpp import json_schema_to_gbnf # Only used directly to convert the JSON schema to GBNF,
|
19 |
-
# The main interface is the HTTP server, not the library directly.
|
20 |
-
|
21 |
"""
|
22 |
import pysbd
|
23 |
sentences = pysbd.Segmenter(language="en", clean=False).segment(paragraph)
|
24 |
"""
|
25 |
|
26 |
-
def llm_streaming(prompt:str, pydantic_model_class, return_pydantic_object=False) -> Union[str, Dict[str, Any]]:
|
27 |
-
schema = pydantic_model_class.model_json_schema()
|
28 |
-
if "example" in schema:
|
29 |
-
del schema["example"]
|
30 |
-
json_schema = json.dumps(schema)
|
31 |
-
#example = Actor.model_config['json_schema_extra']['example']
|
32 |
-
#grammar = llama_cpp.LlamaGrammar.from_json_schema(json_schema,verbose=False)
|
33 |
-
grammar = json_schema_to_gbnf(json_schema)
|
34 |
-
|
35 |
-
payload = {
|
36 |
-
"stream": True,
|
37 |
-
"max_tokens": 1000,
|
38 |
-
"grammar": grammar,
|
39 |
-
"temperature": 1.0,
|
40 |
-
"messages": [
|
41 |
-
{
|
42 |
-
"role": "user",
|
43 |
-
"content": prompt
|
44 |
-
}
|
45 |
-
],
|
46 |
-
}
|
47 |
-
headers = {
|
48 |
-
"Content-Type": "application/json",
|
49 |
-
}
|
50 |
-
|
51 |
-
response = requests.post("http://localhost:5834/v1/chat/completions"
|
52 |
-
, headers=headers, json=payload, stream=True)
|
53 |
-
output_text = ""
|
54 |
-
for chunk in response.iter_lines():
|
55 |
-
if chunk:
|
56 |
-
chunk = chunk.decode("utf-8")
|
57 |
-
if chunk.startswith("data: "):
|
58 |
-
chunk = chunk.split("data: ")[1]
|
59 |
-
if chunk.strip() == "[DONE]":
|
60 |
-
break
|
61 |
-
chunk = json.loads(chunk)
|
62 |
-
new_token = chunk.get('choices')[0].get('delta').get('content')
|
63 |
-
if new_token:
|
64 |
-
output_text = output_text + new_token
|
65 |
-
print(new_token,sep='',end='',flush=True)
|
66 |
-
#else:
|
67 |
-
# raise Exception(f"Parse error, expecting stream:{str(chunk)}")
|
68 |
-
|
69 |
-
if return_pydantic_object:
|
70 |
-
model_object = pydantic_model_class.model_validate_json(output_text)
|
71 |
-
return model_object
|
72 |
-
else:
|
73 |
-
json_output = json.loads(output_text)
|
74 |
-
return json_output
|
75 |
-
|
76 |
global suggestions
|
77 |
suggestions = []
|
78 |
|
@@ -184,21 +135,6 @@ Please score the text.
|
|
184 |
"""
|
185 |
|
186 |
|
187 |
-
def replace_text(template: str, replacements: dict) -> str:
|
188 |
-
for key, value in replacements.items():
|
189 |
-
template = template.replace(f"{{{key}}}", value)
|
190 |
-
return template
|
191 |
-
|
192 |
-
def query_ai_prompt(prompt, replacements, model_class):
|
193 |
-
prompt = replace_text(prompt, replacements)
|
194 |
-
#print('prompt')
|
195 |
-
#print(prompt)
|
196 |
-
return llm_streaming(prompt, model_class)
|
197 |
-
|
198 |
-
|
199 |
-
def generate_gbnf_grammar(models):
|
200 |
-
model = models[0]
|
201 |
-
return json_schema_to_gbnf(json.dumps(model.schema()))
|
202 |
|
203 |
def improve_text():
|
204 |
global suggestions
|
|
|
1 |
+
import json
|
2 |
+
import time
|
3 |
+
from pydantic import BaseModel, Field
|
4 |
+
from utils import query_ai_prompt
|
5 |
+
|
6 |
+
# This script uses the llama_cpp server to improve a text.
|
7 |
+
# To run this script, you need to do something like this:
|
8 |
# Download the model: https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf?download=true
|
9 |
# Rename it as needed.
|
10 |
# Install the server and start it:
|
|
|
17 |
original_text = """Stop chasing dreams instead. Life is not a Hollywood movie. Not everyone is going to get a famous billionaire. Adjust your expectations to reality, and stop thinking so highly of yourself, stop judging others. Assume the responsibility for the things that happen in your life. It is kind of annoying to read your text, it is always some external thing that "happened" to you, and it is always other people who are not up to your standards. At some moment you even declare with despair. And guess what? This is true and false at the same time, in a fundamental level most people are not remarkable, and you probably aren't too. But at the same time, nobody is the same, you have worth just by being, and other people have too. The impression I get is that you must be someone incredibly annoying to work with, and that your performance is not even nearly close to what you think it is, and that you really need to come down to earth. Stop looking outside, work on yourself instead. You'll never be satisfied just by changing jobs. Do therapy if you wish, become acquainted with stoicism, be a volunteer in some poor country, whatever, but do something to regain control of your life, to get some perspective, and to adjust your expectations to reality."""
|
18 |
# From elzbardico on https://news.ycombinator.com/item?id=36119858
|
19 |
|
20 |
+
# TODO: See README.md for the more plans.
|
21 |
+
# TODO: Segment the text into sentences
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
"""
|
23 |
import pysbd
|
24 |
sentences = pysbd.Segmenter(language="en", clean=False).segment(paragraph)
|
25 |
"""
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
global suggestions
|
28 |
suggestions = []
|
29 |
|
|
|
135 |
"""
|
136 |
|
137 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
138 |
|
139 |
def improve_text():
|
140 |
global suggestions
|
utils.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from typing import Any, Dict, Union
|
3 |
+
import requests
|
4 |
+
|
5 |
+
from llama_cpp import json_schema_to_gbnf # Only used directly to convert the JSON schema to GBNF,
|
6 |
+
# The main interface is the HTTP server, not the library directly.
|
7 |
+
|
8 |
+
|
9 |
+
def llm_streaming(prompt:str, pydantic_model_class, return_pydantic_object=False) -> Union[str, Dict[str, Any]]:
|
10 |
+
schema = pydantic_model_class.model_json_schema()
|
11 |
+
if "example" in schema:
|
12 |
+
del schema["example"]
|
13 |
+
json_schema = json.dumps(schema)
|
14 |
+
#example = Actor.model_config['json_schema_extra']['example']
|
15 |
+
#grammar = llama_cpp.LlamaGrammar.from_json_schema(json_schema,verbose=False)
|
16 |
+
grammar = json_schema_to_gbnf(json_schema)
|
17 |
+
|
18 |
+
payload = {
|
19 |
+
"stream": True,
|
20 |
+
"max_tokens": 1000,
|
21 |
+
"grammar": grammar,
|
22 |
+
"temperature": 1.0,
|
23 |
+
"messages": [
|
24 |
+
{
|
25 |
+
"role": "user",
|
26 |
+
"content": prompt
|
27 |
+
}
|
28 |
+
],
|
29 |
+
}
|
30 |
+
headers = {
|
31 |
+
"Content-Type": "application/json",
|
32 |
+
}
|
33 |
+
|
34 |
+
response = requests.post("http://localhost:5834/v1/chat/completions"
|
35 |
+
, headers=headers, json=payload, stream=True)
|
36 |
+
output_text = ""
|
37 |
+
for chunk in response.iter_lines():
|
38 |
+
if chunk:
|
39 |
+
chunk = chunk.decode("utf-8")
|
40 |
+
if chunk.startswith("data: "):
|
41 |
+
chunk = chunk.split("data: ")[1]
|
42 |
+
if chunk.strip() == "[DONE]":
|
43 |
+
break
|
44 |
+
chunk = json.loads(chunk)
|
45 |
+
new_token = chunk.get('choices')[0].get('delta').get('content')
|
46 |
+
if new_token:
|
47 |
+
output_text = output_text + new_token
|
48 |
+
print(new_token,sep='',end='',flush=True)
|
49 |
+
#else:
|
50 |
+
# raise Exception(f"Parse error, expecting stream:{str(chunk)}")
|
51 |
+
|
52 |
+
if return_pydantic_object:
|
53 |
+
model_object = pydantic_model_class.model_validate_json(output_text)
|
54 |
+
return model_object
|
55 |
+
else:
|
56 |
+
json_output = json.loads(output_text)
|
57 |
+
return json_output
|
58 |
+
|
59 |
+
def replace_text(template: str, replacements: dict) -> str:
|
60 |
+
for key, value in replacements.items():
|
61 |
+
template = template.replace(f"{{{key}}}", value)
|
62 |
+
return template
|
63 |
+
|
64 |
+
def query_ai_prompt(prompt, replacements, model_class):
|
65 |
+
prompt = replace_text(prompt, replacements)
|
66 |
+
#print('prompt')
|
67 |
+
#print(prompt)
|
68 |
+
return llm_streaming(prompt, model_class)
|
69 |
+
|