Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -58,20 +58,38 @@ def predict(query):
|
|
58 |
fake, real = probs.detach().cpu().flatten().numpy().tolist()
|
59 |
return real
|
60 |
|
61 |
-
def findRealProb(
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
results = []
|
64 |
for chunk in chunksOfText:
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
68 |
ans = 0
|
69 |
cnt = 0
|
70 |
-
for
|
|
|
|
|
71 |
cnt += length
|
72 |
-
ans = ans +
|
73 |
realProb = ans/cnt
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
demo = gr.Interface(
|
77 |
fn=findRealProb,
|
|
|
58 |
fake, real = probs.detach().cpu().flatten().numpy().tolist()
|
59 |
return real
|
60 |
|
61 |
+
def findRealProb(data):
|
62 |
+
if data is None or len(data) == 0:
|
63 |
+
return jsonify({'error': 'No query provided'}), 400
|
64 |
+
if len(data) > 9400:
|
65 |
+
return jsonify({'error': 'Cannot analyze more than 9400 characters!'}), 400
|
66 |
+
if count_words(data) > 1500:
|
67 |
+
return jsonify({'error': 'Cannot analyze more than 1500 words'}), 400
|
68 |
+
|
69 |
+
# return {"Real": predict(data)}
|
70 |
+
chunksOfText = (chunks_of_900(data))
|
71 |
results = []
|
72 |
for chunk in chunksOfText:
|
73 |
+
outputv1 = predict(chunk, model, tokenizer)
|
74 |
+
# outputv2 = predict(chunk, modelv2, tokenizerv2)
|
75 |
+
label = "CG"
|
76 |
+
if(outputv1>=0.5):
|
77 |
+
label = "OR"
|
78 |
+
results.append({"Text":chunk, "Label": label, "Confidence":(outputv1)})
|
79 |
ans = 0
|
80 |
cnt = 0
|
81 |
+
for result in results:
|
82 |
+
length = len(result["Text"])
|
83 |
+
confidence = result["Confidence"]
|
84 |
cnt += length
|
85 |
+
ans = ans + (confidence)*(length)
|
86 |
realProb = ans/cnt
|
87 |
+
label = "AI"
|
88 |
+
if realProb > 0.7:
|
89 |
+
label = "Human"
|
90 |
+
elif realProb > 0.3 and realProb < 0.7:
|
91 |
+
label = "Might be AI"
|
92 |
+
return jsonify({"Real": realProb, "Fake": 1-realProb, "Label": label, "Chunks": results})
|
93 |
|
94 |
demo = gr.Interface(
|
95 |
fn=findRealProb,
|