Spaces:
Running
Running
Melhorias no calculo das probs
Browse files
server.js
CHANGED
@@ -138,7 +138,7 @@ function UpdateProbs(opts){
|
|
138 |
|
139 |
// Organiza pela ordem...
|
140 |
let SortedModels = ModelList.sort( (a,b) => {
|
141 |
-
let diff = a.stats.parcela - b.stats.parcela;
|
142 |
|
143 |
if(diff == 0)
|
144 |
diff = a.stats.parcela*Math.random() - b.stats.parcela*Math.random()
|
@@ -194,7 +194,8 @@ async function GetModelAnswer(model, prompt){
|
|
194 |
}
|
195 |
|
196 |
console.log("Falando com a IA 🤖", model, ModelConfig.name)
|
197 |
-
MyStats.total
|
|
|
198 |
const response = await fetch(
|
199 |
InferenceApi,
|
200 |
{
|
@@ -203,7 +204,9 @@ async function GetModelAnswer(model, prompt){
|
|
203 |
body: JSON.stringify(data),
|
204 |
}
|
205 |
);
|
206 |
-
|
|
|
|
|
207 |
|
208 |
if(response.status != 200){
|
209 |
MyStats.erros++;
|
@@ -225,7 +228,18 @@ async function GetModelAnswer(model, prompt){
|
|
225 |
console.log("Tentando com o ",model);
|
226 |
continue;
|
227 |
}
|
228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
console.log("Ok, lendo o json...", response.status);
|
230 |
const result = await response.json();
|
231 |
LastWorkedModel = model;
|
@@ -352,22 +366,31 @@ app.get('/error', async (req, res) => {
|
|
352 |
let EndTime = new Date();
|
353 |
let TotalMs = EndTime.getTime() - StartTime.getTime();
|
354 |
|
|
|
|
|
355 |
console.log("Respondido", TotalMs, result);
|
356 |
|
357 |
let resp = result.result;
|
358 |
if(!resp || !Array.isArray(resp)){
|
359 |
res.json({text:":("});
|
|
|
360 |
return;
|
361 |
}
|
362 |
|
363 |
let gentext = resp[0].generated_text
|
364 |
|
365 |
let textParts = gentext.split('|fim|');
|
|
|
|
|
|
|
|
|
|
|
366 |
let txtFinal = textParts[0].trim();
|
367 |
|
368 |
let estimatedChars = max*8;
|
369 |
if(txtFinal.length >= estimatedChars){
|
370 |
txtFinal = txtFinal.slice(0,estimatedChars);
|
|
|
371 |
}
|
372 |
|
373 |
console.log("FullResp:"+gentext);
|
|
|
138 |
|
139 |
// Organiza pela ordem...
|
140 |
let SortedModels = ModelList.sort( (a,b) => {
|
141 |
+
let diff = parseFloat(a.stats.parcela.toPrecision(2)) - parseFloat(b.stats.parcela.toPrecision(2));
|
142 |
|
143 |
if(diff == 0)
|
144 |
diff = a.stats.parcela*Math.random() - b.stats.parcela*Math.random()
|
|
|
194 |
}
|
195 |
|
196 |
console.log("Falando com a IA 🤖", model, ModelConfig.name)
|
197 |
+
MyStats.total++;
|
198 |
+
let StartTime = new Date();
|
199 |
const response = await fetch(
|
200 |
InferenceApi,
|
201 |
{
|
|
|
204 |
body: JSON.stringify(data),
|
205 |
}
|
206 |
);
|
207 |
+
let EndTime = new Date();
|
208 |
+
let ElapsedTime = EndTime.getTime() - StartTime.getTime();
|
209 |
+
console.log("Total", ElapsedTime);
|
210 |
|
211 |
if(response.status != 200){
|
212 |
MyStats.erros++;
|
|
|
228 |
console.log("Tentando com o ",model);
|
229 |
continue;
|
230 |
}
|
231 |
+
|
232 |
+
// Validacoes adicionais de erros!
|
233 |
+
// Tempo de resposta maior que que 2s?
|
234 |
+
// Penaliza
|
235 |
+
if(ElapsedTime >= 2500)
|
236 |
+
MyStats.erros += 0.100;
|
237 |
+
|
238 |
+
if(ElapsedTime < 900){
|
239 |
+
MyStats.erros -= 0.100;
|
240 |
+
if(MyStats.erros < 0) MyStats.erros = 0;
|
241 |
+
}
|
242 |
+
|
243 |
console.log("Ok, lendo o json...", response.status);
|
244 |
const result = await response.json();
|
245 |
LastWorkedModel = model;
|
|
|
366 |
let EndTime = new Date();
|
367 |
let TotalMs = EndTime.getTime() - StartTime.getTime();
|
368 |
|
369 |
+
let ModelInfo = MODELS[result.model]
|
370 |
+
|
371 |
console.log("Respondido", TotalMs, result);
|
372 |
|
373 |
let resp = result.result;
|
374 |
if(!resp || !Array.isArray(resp)){
|
375 |
res.json({text:":("});
|
376 |
+
ModelInfo.stats.erros += 0.2;
|
377 |
return;
|
378 |
}
|
379 |
|
380 |
let gentext = resp[0].generated_text
|
381 |
|
382 |
let textParts = gentext.split('|fim|');
|
383 |
+
|
384 |
+
if(textParts.length < 2){
|
385 |
+
ModelInfo.stats.erros += 0.1;
|
386 |
+
}
|
387 |
+
|
388 |
let txtFinal = textParts[0].trim();
|
389 |
|
390 |
let estimatedChars = max*8;
|
391 |
if(txtFinal.length >= estimatedChars){
|
392 |
txtFinal = txtFinal.slice(0,estimatedChars);
|
393 |
+
ModelInfo.stats.erros += 0.05;
|
394 |
}
|
395 |
|
396 |
console.log("FullResp:"+gentext);
|