loubnabnl HF staff commited on
Commit
a799099
·
1 Parent(s): cb05bec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -2
app.py CHANGED
@@ -1,9 +1,10 @@
1
  import json
 
2
  import pandas as pd
3
  import requests
4
  import threading
5
  import streamlit as st
6
-
7
 
8
  MODELS = ["CodeParrot", "InCoder", "CodeGen", "PolyCoder"]
9
  GENERATION_MODELS = ["CodeParrot", "InCoder", "CodeGen"]
@@ -14,6 +15,16 @@ def load_examples():
14
  with open("utils/examples.json", "r") as f:
15
  examples = json.load(f)
16
  return examples
 
 
 
 
 
 
 
 
 
 
17
 
18
 
19
  def read_markdown(path):
@@ -111,7 +122,28 @@ read_markdown(f"architectures/{selected_model.lower()}.md")
111
  st.subheader("3 - Code model evaluation")
112
  read_markdown("evaluation/intro.md")
113
  read_markdown("evaluation/demo_humaneval.md")
114
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  # Code generation
116
  st.subheader("4 - Code generation ✨")
117
  read_markdown("generation/intro.md")
 
1
  import json
2
+ import os
3
  import pandas as pd
4
  import requests
5
  import threading
6
  import streamlit as st
7
+ from datasets import load_dataset, load_metric
8
 
9
  MODELS = ["CodeParrot", "InCoder", "CodeGen", "PolyCoder"]
10
  GENERATION_MODELS = ["CodeParrot", "InCoder", "CodeGen"]
 
15
  with open("utils/examples.json", "r") as f:
16
  examples = json.load(f)
17
  return examples
18
+
19
+
20
+ def load_evaluation():
21
+ # load task 2 of HumanEval and code_eval_metric
22
+ os.environ["HF_ALLOW_CODE_EVAL"] = "1"
23
+ human_eval = load_dataset("openai_humaneval")
24
+ entry_point = f"check({human_eval['test'][2]['entry_point']})"
25
+ test_func = "\n" + human_eval["test"][2]["test"] + "\n" + entry_point
26
+ code_eval = load_metric("code_eval")
27
+ return code_eval, test_func
28
 
29
 
30
  def read_markdown(path):
 
122
  st.subheader("3 - Code model evaluation")
123
  read_markdown("evaluation/intro.md")
124
  read_markdown("evaluation/demo_humaneval.md")
125
+ ## quiz
126
+ st.markdown("Below you can try solving this problem or visualize the solution of CodeParrot:")
127
+ with open("evaluation/problem.md", "r") as f:
128
+ problem = f.read()
129
+ with open("evaluation/solution.md", "r") as f:
130
+ solution = f.read()
131
+
132
+ candidate_solution = st.text_area(
133
+ "Complete the problem:",
134
+ value=problem,
135
+ height=200,
136
+ ).strip()
137
+ if st.button("Test my solution", key=2):
138
+ with st.spinner("Testing..."):
139
+ code_eval, test_func = load_evaluation()
140
+ test_cases = [test_func]
141
+ candidates = [[candidate_solution]]
142
+ pass_at_k, _ = code_eval.compute(references=test_cases, predictions=candidates)
143
+ st.markdown(f"Your pass@1 is {int(pass_at_k['pass@1'])}")
144
+ if st.button("Show model solution", key=3):
145
+ st.markdown(solution)
146
+
147
  # Code generation
148
  st.subheader("4 - Code generation ✨")
149
  read_markdown("generation/intro.md")