|
import torch |
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen2-1B") |
|
model = AutoModelForCausalLM.from_pretrained("sarah111/codegen_1b_tatqa", trust_remote_code=True) |
|
|
|
import gradio as gr |
|
|
|
def tableQA(Question, table): |
|
answer = 0 |
|
program = "" |
|
try : |
|
table = table.to_csv(header=True, index=False).strip('\n').split('\n') |
|
table = '\n'.join(table) |
|
|
|
entree = 'Table :\n{0}\n\nQuestion : {1}\n\nProgram :'.format(table, Question) |
|
print(entree) |
|
model.to(device) |
|
input_ids = tokenizer(entree, return_tensors="pt").input_ids.to(device) |
|
|
|
gen_tokens = model.generate(input_ids.to(device),do_sample=True,temperature=0.9,max_length=400) |
|
output = tokenizer.batch_decode(gen_tokens)[0] |
|
program = output.split("Program :````Python\n",1)[1].split("<|endoftext|>",1)[0].split("answer=")[1] |
|
print(program) |
|
program = program.replace(",", "") |
|
answer = eval(program) |
|
print(answer) |
|
|
|
except: |
|
print('exception') |
|
|
|
return program, answer |
|
|
|
demo = gr.Interface( |
|
fn = tableQA, |
|
inputs = [ |
|
"text", |
|
gr.Dataframe( |
|
headers=["", "2019", "2018"], |
|
datatype=["str", "str", "str"], |
|
label="Table", |
|
), |
|
], |
|
outputs=[gr.Textbox(label="Derivation"),gr.Textbox(label="Result")], |
|
title ="Outil dβaide aux financiers", |
|
description = "Ce prototype met en Γ©vidence une situation rΓ©elle oΓΉ le systΓ¨me de question rΓ©ponse est mis en place pour permettre Γ des financiers de poser des questions nΓ©cessitant un raisonnement arithmΓ©tique et portant sur une table issue dβun rapport financier.", |
|
examples=ex, |
|
|
|
) |
|
|
|
|
|
|
|
demo.launch(share=True) |
|
|