Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ import re
|
|
7 |
import time
|
8 |
import random
|
9 |
import os
|
|
|
10 |
|
11 |
def setup_session():
|
12 |
try:
|
@@ -96,6 +97,18 @@ def clean_text(text):
|
|
96 |
text = re.sub(r'\s+', ' ', text).strip()
|
97 |
return text
|
98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
def analyze_info(category, topic, references1, references2, references3):
|
100 |
return f"선택한 카테고리: {category}\n블로그 주제: {topic}\n참고 글1: {references1}\n참고 글2: {references2}\n참고 글3: {references3}"
|
101 |
|
|
|
7 |
import time
|
8 |
import random
|
9 |
import os
|
10 |
+
from huggingface_hub import InferenceClient
|
11 |
|
12 |
def setup_session():
|
13 |
try:
|
|
|
97 |
text = re.sub(r'\s+', ' ', text).strip()
|
98 |
return text
|
99 |
|
100 |
+
def create_client(model_name):
|
101 |
+
return InferenceClient(model_name, token=os.getenv("HF_TOKEN"))
|
102 |
+
|
103 |
+
client = create_client("OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5")
|
104 |
+
|
105 |
+
def call_api(content, system_message, max_tokens, temperature, top_p):
|
106 |
+
messages = [{"role": "system", "content": system_message}, {"role": "user", "content": content}]
|
107 |
+
random_seed = random.randint(0, 1000000)
|
108 |
+
response = client.chat_completion(messages=messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p, seed=random_seed)
|
109 |
+
modified_text = response.choices[0].message.content
|
110 |
+
return modified_text
|
111 |
+
|
112 |
def analyze_info(category, topic, references1, references2, references3):
|
113 |
return f"선택한 카테고리: {category}\n블로그 주제: {topic}\n참고 글1: {references1}\n참고 글2: {references2}\n참고 글3: {references3}"
|
114 |
|