run480 commited on
Commit
530832a
·
verified ·
1 Parent(s): 0759a00

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -50
app.py CHANGED
@@ -395,57 +395,80 @@
395
  #-----------------------------------------------------------------------------------
396
  # 17. Chatbot/Dialog Bot: a simple bot named Alicia that is based on the Microsoft DialoGPT model .
397
 
398
- from transformers import AutoModelForCausalLM, AutoTokenizer,BlenderbotForConditionalGeneration
399
- import torch
400
-
401
- chat_tkn = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
402
- mdl = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
403
-
404
- #chat_tkn = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
405
- #mdl = BlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
406
-
407
- def converse(user_input, chat_history=[]):
408
- user_input_ids = chat_tkn(user_input + chat_tkn.eos_token, return_tensors='pt').input_ids
409
- # keep history in the tensor
410
- bot_input_ids = torch.cat([torch.LongTensor(chat_history), user_input_ids], dim=-1)
411
- # get response
412
- chat_history = mdl.generate(bot_input_ids, max_length=1000, pad_token_id=chat_tkn.eos_token_id).tolist()
413
- print (chat_history)
414
- response = chat_tkn.decode(chat_history[0]).split("<|endoftext|>")
415
- print("starting to print response")
416
- print(response)
417
- # html for display
418
- html = "<div class='mybot'>"
419
- for x, mesg in enumerate(response):
420
- if x%2!=0 :
421
- mesg="Alicia:"+mesg
422
- clazz="alicia"
423
- else :
424
- clazz="user"
425
- print("value of x")
426
- print(x)
427
- print("message")
428
- print (mesg)
429
- html += "<div class='mesg {}'> {}</div>".format(clazz, mesg)
430
- html += "</div>"
431
- print(html)
432
 
433
- return html, chat_history
434
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
435
  import gradio as grad
436
 
437
- css = """
438
- .mychat {display:flex;flex-direction:column}
439
- .mesg {padding:5px;margin-bottom:5px;border-radius:5px;width:75%}
440
- .mesg.user {background-color:lightblue;color:white}
441
- .mesg.alicia {background-color:orange;color:white,align-self:self-end}
442
- .footer {display:none !important}
443
- """
444
-
445
- text=grad.Textbox(placeholder="Lets chat")
446
-
447
- grad.Interface(fn=converse,
448
- theme="default",
449
- inputs=[text, "state"],
450
- outputs=["html", "state"],
451
- css=css).launch()
 
395
  #-----------------------------------------------------------------------------------
396
  # 17. Chatbot/Dialog Bot: a simple bot named Alicia that is based on the Microsoft DialoGPT model .
397
 
398
+ # from transformers import AutoModelForCausalLM, AutoTokenizer,BlenderbotForConditionalGeneration
399
+ # import torch
400
+
401
+ # chat_tkn = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
402
+ # mdl = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
403
+
404
+ # #chat_tkn = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
405
+ # #mdl = BlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
406
+
407
+ # def converse(user_input, chat_history=[]):
408
+ # user_input_ids = chat_tkn(user_input + chat_tkn.eos_token, return_tensors='pt').input_ids
409
+ # # keep history in the tensor
410
+ # bot_input_ids = torch.cat([torch.LongTensor(chat_history), user_input_ids], dim=-1)
411
+ # # get response
412
+ # chat_history = mdl.generate(bot_input_ids, max_length=1000, pad_token_id=chat_tkn.eos_token_id).tolist()
413
+ # print (chat_history)
414
+ # response = chat_tkn.decode(chat_history[0]).split("<|endoftext|>")
415
+ # print("starting to print response")
416
+ # print(response)
417
+ # # html for display
418
+ # html = "<div class='mybot'>"
419
+ # for x, mesg in enumerate(response):
420
+ # if x%2!=0 :
421
+ # mesg="Alicia:"+mesg
422
+ # clazz="alicia"
423
+ # else :
424
+ # clazz="user"
425
+ # print("value of x")
426
+ # print(x)
427
+ # print("message")
428
+ # print (mesg)
429
+ # html += "<div class='mesg {}'> {}</div>".format(clazz, mesg)
430
+ # html += "</div>"
431
+ # print(html)
432
 
433
+ # return html, chat_history
434
 
435
+ # import gradio as grad
436
+
437
+ # css = """
438
+ # .mychat {display:flex;flex-direction:column}
439
+ # .mesg {padding:5px;margin-bottom:5px;border-radius:5px;width:75%}
440
+ # .mesg.user {background-color:lightblue;color:white}
441
+ # .mesg.alicia {background-color:orange;color:white,align-self:self-end}
442
+ # .footer {display:none !important}
443
+ # """
444
+
445
+ # text=grad.Textbox(placeholder="Lets chat")
446
+
447
+ # grad.Interface(fn=converse,
448
+ # theme="default",
449
+ # inputs=[text, "state"],
450
+ # outputs=["html", "state"],
451
+ # css=css).launch()
452
+
453
+ #-----------------------------------------------------------------------------------
454
+ # 18. Code and Code Comment Generation
455
+
456
+ # CodeGen is a language model that converts basic English prompts into code that can be executed.
457
+ # Instead of writing code yourself, you describe what the code should do using natural language, and
458
+ # the machine writes the code for you based on what you’ve described.
459
+
460
+ from transformers import AutoTokenizer, AutoModelForCausalLM
461
  import gradio as grad
462
 
463
+ codegen_tkn = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
464
+ mdl = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-350M-mono")
465
+
466
+ def codegen(intent):
467
+ # give input as text which reflects intent of the program.
468
+ #text = " write a function which takes 2 numbers as input and returns the larger of the two"
469
+ input_ids = codegen_tkn(intent, return_tensors="pt").input_ids
470
+ gen_ids = mdl.generate(input_ids, max_length=128)
471
+ response = codegen_tkn.decode(gen_ids[0], skip_special_tokens=True)
472
+ return response
473
+
474
+ output=grad.Textbox(lines=1, label="Generated Python Code", placeholder="")