sasha's picture
sasha HF staff
Update app.py
afbe846 verified
raw
history blame
6.54 kB
import os
import json
from datetime import datetime, timezone
from dataclasses import dataclass
from datasets import load_dataset, load_dataset
import pandas as pd
import gradio as gr
from huggingface_hub import HfApi, snapshot_download
from huggingface_hub.hf_api import ModelInfo
from enum import Enum
OWNER = "EnergyStarAI"
COMPUTE_SPACE = f"{OWNER}/launch-computation-example"
requests= load_dataset("EnergyStarAI/requests_debug", split="test")
TOKEN = os.environ.get("DEBUG")
API = HfApi(token=TOKEN)
tasks = ['ASR', 'Object Detection', 'Text Classification', 'Image Captioning', 'Question Answering', 'Text Generation', 'Image Classification',
'Sentence Similarity', 'Image Generation', 'Summarization']
##### Data classes need for the leaderboard Submit Model menu. #####
@dataclass
class ModelDetails:
name: str
display_name: str = ""
symbol: str = "" # emoji
class WeightType(Enum):
Adapter = ModelDetails("Adapter")
Original = ModelDetails("Original")
Delta = ModelDetails("Delta")
class Precision(Enum):
float16 = ModelDetails("float16")
bfloat16 = ModelDetails("bfloat16")
float32 = ModelDetails("float32")
bfloat32 = ModelDetails("bfloat32")
Unknown = ModelDetails("?")
def from_str(precision):
if precision in ["torch.float16", "float16"]:
return Precision.float16
if precision in ["torch.bfloat16", "bfloat16"]:
return Precision.bfloat16
if precision in ["torch.bfloat32", "bfloat32"]:
return Precision.bfloat32
if precision in ["torch.float32", "float32"]:
return Precision.float32
class ModelType(Enum):
PT = ModelDetails(name="pretrained", symbol="🟒")
FT = ModelDetails(name="fine-tuned", symbol="πŸ”Ά")
IFT = ModelDetails(name="instruction-tuned", symbol="β­•")
RL = ModelDetails(name="RL-tuned", symbol="🟦")
Unknown = ModelDetails(name="", symbol="?")
def to_str(self, separator=" "):
return f"{self.value.symbol}{separator}{self.value.name}"
@staticmethod
def from_str(type):
if "fine-tuned" in type or "πŸ”Ά" in type:
return ModelType.FT
if "pretrained" in type or "🟒" in type:
return ModelType.PT
if "RL-tuned" in type or "🟦" in type:
return ModelType.RL
if "instruction-tuned" in type or "β­•" in type:
return ModelType.IFT
return ModelType.Unknown
##### End of classes required by the leaderboard Submit Model menu #####
def start_compute_space():
API.restart_space(COMPUTE_SPACE)
return f"Okay! {COMPUTE_SPACE} should be running now!"
def get_model_size(model_info: ModelInfo, precision: str):
"""Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
try:
model_size = round(model_info.safetensors["total"] / 1e9, 3)
except (AttributeError, TypeError):
return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
model_size = size_factor * model_size
return model_size
def add_new_eval(
repo_id: str,
precision: str,
task: str,
):
model_owner = repo_id.split("/")[0]
model_name = repo_id.split("/")[1]
precision = precision.split(" ")[0]
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
# Is the model info correctly filled?
try:
model_info = API.model_info(repo_id=repo_id)
except Exception:
print("Could not find information for model %s" % (model))
return
# return styled_error("Could not get your model information. Please fill it up properly.")
model_size = get_model_size(model_info=model_info, precision=precision)
print("Adding request")
requests_dset = requests.to_pandas()
request_dict = {
"model": repo_id,
"precision": precision,
"status": "PENDING",
"submitted_time": current_time,
"task": task,
"likes": model_info.likes,
"params": model_size}
#"license": license,
#"private": False,
#}
print("Writing out temp request file to %s" % temp_out_file)
df_request_dict = pd.DataFrame({'name':request_dict.keys(), 'value':request_dict.values()})
df_final = pd.concat([requests_dset, df_request_dict], ignore_index=True)
updated_dset =Dataset.from_pandas(df_final)
updated_dset.push_to_hub("EnergyStarAI/requests_debug", split="test")
print("Starting compute space at %s " % COMPUTE_SPACE)
return start_compute_space()
def print_existing_models():
requests = load_dataset("EnergyStarAI/requests_debug", split="test")
requests_dset = requests.to_pandas()
model_list= requests_dset[requests_dset['status'] == 'COMPLETED']
return model_list
with gr.Blocks() as demo:
gr.Markdown("#Energy Star Submission Portal - v.0 (2024) 🌎 πŸ’» 🌟")
gr.Markdown("## βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
gr.Markdown("## Fill out below then click **Run Analysis** to create the request file and launch the job.")
gr.Markdown("## The [Project Leaderboard](https://huggingface.co/spaces/EnergyStarAI/2024_Leaderboard) will be updated quarterly, as new models get submitted.")
with gr.Row():
with gr.Column():
task = gr.Dropdown(
choices=tasks,
label="Choose a benchmark task",
multiselect=False,
interactive=True,
)
with gr.Column():
model_name_textbox = gr.Textbox(label="Model name")
precision = gr.Dropdown(
choices=[i.value.name for i in Precision if i != Precision.Unknown],
label="Precision",
multiselect=False,
value="float16",
interactive=True,
)
with gr.Row():
with gr.Column():
submit_button = gr.Button("Run Analysis")
submission_result = gr.Markdown()
submit_button.click(
fn=add_new_eval,
inputs=[
model_name_textbox,
precision,
task,
],
outputs=submission_result,
)
with gr.Row():
gr.Dataframe(print_existing_models())
demo.launch()