sagar007 commited on
Commit
888e60b
·
verified ·
1 Parent(s): 56f9710

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -5
app.py CHANGED
@@ -6,12 +6,25 @@ from peft import PeftModel, PeftConfig
6
  # Load the fine-tuned model and tokenizer
7
  model_name = "sagar007/phi-1_5-finetuned" # Updated model path
8
  config = PeftConfig.from_pretrained(model_name)
9
- model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, torch_dtype=torch.float16, device_map="auto")
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  model = PeftModel.from_pretrained(model, model_name)
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
 
13
  # Create a text generation pipeline
14
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map="auto")
15
 
16
  def generate_text(prompt, max_length=100, temperature=0.7, top_p=0.9):
17
  """Generate text based on the input prompt."""
@@ -46,9 +59,19 @@ with gr.Blocks(css=custom_css) as iface:
46
  # Add example prompts
47
  gr.Examples(
48
  examples=[
49
- "Once upon a time in a galaxy far, far away",
50
- "The secret to happiness is",
51
- "In the year 2050, artificial intelligence has"
 
 
 
 
 
 
 
 
 
 
52
  ],
53
  inputs=input_text
54
  )
 
6
  # Load the fine-tuned model and tokenizer
7
  model_name = "sagar007/phi-1_5-finetuned" # Updated model path
8
  config = PeftConfig.from_pretrained(model_name)
9
+
10
+ # Check if CUDA is available
11
+ if torch.cuda.is_available():
12
+ device_map = "auto"
13
+ torch_dtype = torch.float16
14
+ else:
15
+ device_map = "cpu"
16
+ torch_dtype = torch.float32
17
+
18
+ model = AutoModelForCausalLM.from_pretrained(
19
+ config.base_model_name_or_path,
20
+ torch_dtype=torch_dtype,
21
+ device_map=device_map
22
+ )
23
  model = PeftModel.from_pretrained(model, model_name)
24
  tokenizer = AutoTokenizer.from_pretrained(model_name)
25
 
26
  # Create a text generation pipeline
27
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device_map=device_map)
28
 
29
  def generate_text(prompt, max_length=100, temperature=0.7, top_p=0.9):
30
  """Generate text based on the input prompt."""
 
59
  # Add example prompts
60
  gr.Examples(
61
  examples=[
62
+ "Explain the concept of machine learning.",
63
+ "Write a short story about a robot learning to paint.",
64
+ "What are some effective ways to reduce stress?",
65
+ "Summarize the key points of climate change in simple terms.",
66
+ "Create a step-by-step guide for making a perfect omelette.",
67
+ "Describe the differences between classical and quantum computing.",
68
+ "Write a motivational speech for a team starting a new project.",
69
+ "Explain the importance of biodiversity in ecosystems.",
70
+ "Compose a haiku about artificial intelligence.",
71
+ "List five tips for effective time management.",
72
+ "Describe the process of photosynthesis in layman's terms.",
73
+ "Write a dialogue between two characters discussing the future of space exploration.",
74
+ "Explain the concept of blockchain technology and its potential applications."
75
  ],
76
  inputs=input_text
77
  )