Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,17 +1,25 @@
|
|
1 |
from flask import Flask, request
|
|
|
|
|
2 |
from transformers import RobertaForSequenceClassification, RobertaTokenizer, RobertaConfig
|
3 |
import torch
|
|
|
4 |
import gradio as gr
|
5 |
import os
|
6 |
import re
|
7 |
app = Flask(__name__)
|
8 |
|
9 |
ACCESS_TOKEN = os.environ["ACCESS_TOKEN"]
|
10 |
-
config = RobertaConfig.from_pretrained("PirateXX/ChatGPT-Text-Detector", use_auth_token= ACCESS_TOKEN)
|
11 |
-
model = RobertaForSequenceClassification.from_pretrained("PirateXX/ChatGPT-Text-Detector", use_auth_token= ACCESS_TOKEN, config = config)
|
12 |
|
13 |
-
|
14 |
-
tokenizer =
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
def text_to_sentences(text):
|
17 |
clean_text = text.replace('\n', ' ')
|
@@ -34,7 +42,7 @@ def chunks_of_900(text, chunk_size=900):
|
|
34 |
chunks.append(current_chunk)
|
35 |
return chunks
|
36 |
|
37 |
-
def predict(query
|
38 |
tokens = tokenizer.encode(query)
|
39 |
all_tokens = len(tokens)
|
40 |
tokens = tokens[:tokenizer.model_max_length - 2]
|
|
|
1 |
from flask import Flask, request
|
2 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
3 |
+
from transformers import RobertaConfig
|
4 |
from transformers import RobertaForSequenceClassification, RobertaTokenizer, RobertaConfig
|
5 |
import torch
|
6 |
+
from torch import cuda
|
7 |
import gradio as gr
|
8 |
import os
|
9 |
import re
|
10 |
app = Flask(__name__)
|
11 |
|
12 |
ACCESS_TOKEN = os.environ["ACCESS_TOKEN"]
|
13 |
+
# config = RobertaConfig.from_pretrained("PirateXX/ChatGPT-Text-Detector", use_auth_token= ACCESS_TOKEN)
|
14 |
+
# model = RobertaForSequenceClassification.from_pretrained("PirateXX/ChatGPT-Text-Detector", use_auth_token= ACCESS_TOKEN, config = config)
|
15 |
|
16 |
+
device = 'cuda' if cuda.is_available() else 'cpu'
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained("PirateXX/AI-Content-Detector", use_auth_token= "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")
|
18 |
+
model = AutoModelForSequenceClassification.from_pretrained("PirateXX/AI-Content-Detector", use_auth_token= "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")
|
19 |
+
model.to(device)
|
20 |
+
|
21 |
+
# model_name = "roberta-base"
|
22 |
+
# tokenizer = RobertaTokenizer.from_pretrained(model_name, map_location=torch.device('cpu'))
|
23 |
|
24 |
def text_to_sentences(text):
|
25 |
clean_text = text.replace('\n', ' ')
|
|
|
42 |
chunks.append(current_chunk)
|
43 |
return chunks
|
44 |
|
45 |
+
def predict(query):
|
46 |
tokens = tokenizer.encode(query)
|
47 |
all_tokens = len(tokens)
|
48 |
tokens = tokens[:tokenizer.model_max_length - 2]
|