Zeroxdesignart commited on
Commit
302cf9d
·
verified ·
1 Parent(s): b863906

Create model.py

Browse files
Files changed (1) hide show
  1. model.py +71 -0
model.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import torch
3
+ from music21 import converter, instrument, note, chord, stream
4
+ import gradio as gr
5
+
6
+ # Load Models
7
+ melody_model = AutoModelForCausalLM.from_pretrained("your_melody_model") # Replace with actual model name
8
+ harmony_model = AutoModelForCausalLM.from_pretrained("your_harmony_model") # Replace with actual model name
9
+ rhythm_model = AutoModelForCausalLM.from_pretrained("your_rhythm_model") # Replace with actual model name
10
+ tokenizer = AutoTokenizer.from_pretrained("gpt2")
11
+
12
+ # Define functions for each step
13
+
14
+ def generate_melody(prompt, length=50):
15
+ """Generates a melody sequence based on the given prompt."""
16
+ inputs = tokenizer(prompt, return_tensors="pt")
17
+ melody_output = melody_model.generate(inputs['input_ids'], max_length=length)
18
+ melody_notes = tokenizer.decode(melody_output[0], skip_special_tokens=True)
19
+ return melody_notes
20
+
21
+ def generate_harmony(melody_sequence, length=100):
22
+ """Generates harmonic support based on the melody."""
23
+ harmony_input = torch.cat([tokenizer.encode(melody_sequence, return_tensors="pt"), tokenizer("add harmony", return_tensors="pt")['input_ids']], dim=1)
24
+ harmony_output = harmony_model.generate(harmony_input, max_length=length)
25
+ harmony_notes = tokenizer.decode(harmony_output[0], skip_special_tokens=True)
26
+ return harmony_notes
27
+
28
+ def generate_rhythm(harmony_sequence, length=50):
29
+ """Adds rhythm to the harmony for structure."""
30
+ rhythm_input = torch.cat([tokenizer.encode(harmony_sequence, return_tensors="pt"), tokenizer("add rhythm", return_tensors="pt")['input_ids']], dim=1)
31
+ rhythm_output = rhythm_model.generate(rhythm_input, max_length=length)
32
+ rhythm_sequence = tokenizer.decode(rhythm_output[0], skip_special_tokens=True)
33
+ return rhythm_sequence
34
+
35
+ def create_midi(melody, harmony, rhythm):
36
+ """Converts melody, harmony, and rhythm sequences to MIDI format."""
37
+ composition = stream.Stream()
38
+
39
+ for part in [melody, harmony, rhythm]:
40
+ for token in part.split():
41
+ if token.isdigit():
42
+ midi_note = note.Note(int(token))
43
+ midi_note.quarterLength = 0.5
44
+ composition.append(midi_note)
45
+ elif token == "rest":
46
+ rest_note = note.Rest()
47
+ rest_note.quarterLength = 0.5
48
+ composition.append(rest_note)
49
+
50
+ midi_fp = "generated_music.mid"
51
+ composition.write('midi', fp=midi_fp)
52
+ return midi_fp
53
+
54
+ # Full generation function
55
+ def generate_music(prompt, length=50):
56
+ melody = generate_melody(prompt, length)
57
+ harmony = generate_harmony(melody, length)
58
+ rhythm = generate_rhythm(harmony, length)
59
+ midi_file = create_midi(melody, harmony, rhythm)
60
+ return midi_file
61
+
62
+ # Set up Gradio interface
63
+ iface = gr.Interface(
64
+ fn=generate_music,
65
+ inputs=["text", "slider"],
66
+ outputs="file",
67
+ title="Multi-Model AI Music Generator",
68
+ description="Generate music using a multi-model AI system that combines melody, harmony, and rhythm layers.",
69
+ )
70
+
71
+ iface.launch()