interactive-symbolic-music commited on
Commit
049cfd4
·
1 Parent(s): 836c70c

moved to github.io

Browse files
app.py CHANGED
@@ -57,10 +57,8 @@ def update_musescore_image(selected_prompt):
57
  elif selected_prompt == "example 6":
58
  return "samples/diy_examples/example6/example6.jpg"
59
 
60
-
61
- # Model for generating music (example)
62
  def generate_music(prompt, tempo, num_samples=1, mode="example", rhythm_control="Yes"):
63
-
64
  ldm_model = init_ldm_model(params_chord_lsh_cond, debug_mode=False)
65
  model = Diffpro_SDF.load_trained(ldm_model, model_path).to(device)
66
  sampler = SDFSampler(model.ldm, 64, 64, is_autocast=False, device=device, debug_mode=False)
@@ -101,7 +99,6 @@ def generate_music(prompt, tempo, num_samples=1, mode="example", rhythm_control=
101
  if background_condition[:,4:6,:,:].min()>=0:
102
  full_lsh_roll = extend_piano_roll(background_condition[i,4:6,:,:].cpu().numpy())
103
  midi_file = piano_roll_to_midi(full_roll, full_chd_roll, full_lsh_roll, bpm=tempo)
104
- # filename = f'DDIM_w_rhythm_onset_0to10_{i}_edit_x0_and_eps'+'.mid'
105
  filename = f"output_{i}.mid"
106
  save_midi(midi_file, filename)
107
  subprocess.Popen(['timidity',f'output_{i}.mid','-Ow','-o',f'output_{i}.wav']).communicate()
@@ -126,129 +123,16 @@ def visualize_midi(midi):
126
  plt.savefig(output_image_path)
127
  return output_image_path
128
 
129
- def plot_rhythm(rhythm_str, label):
130
- if rhythm_str=="null rhythm":
131
- return None
132
- fig, ax = plt.subplots(figsize=(6, 2))
133
-
134
- # Ensure it's a 16-bit string
135
- rhythm_str = rhythm_str[:16]
136
-
137
- # Convert string to a list of 0s and 1s
138
- rhythm = [0 if bit=="0" else 1 for bit in rhythm_str]
139
-
140
- # Define the x axis for the 16 sixteenth notes
141
- x = list(range(1, 17)) # 1 to 16 sixteenth notes
142
-
143
- # Plot each note (1 as filled circle, 0 as empty circle)
144
- for i, bit in enumerate(rhythm):
145
- if bit == 1:
146
- ax.scatter(i + 1, 1, color='black', s=100, label="Note" if i == 0 else "")
147
- else:
148
- ax.scatter(i + 1, 1, edgecolor='black', facecolor='none', s=100, label="Rest" if i == 0 else "")
149
-
150
- # Distinguish groups of 4 using vertical dashed lines (no solid grid lines)
151
- for i in range(4, 17, 4):
152
- ax.axvline(x=i + 0.5, color='grey', linestyle='--')
153
-
154
- # Remove solid vertical grid lines by setting the grid off
155
- ax.grid(False)
156
-
157
- # Formatting the plot
158
- ax.set_xlim(0.5, 16.5)
159
- ax.set_ylim(0.8, 1.2)
160
- ax.set_xticks(x)
161
- ax.set_yticks([])
162
- ax.set_xlabel("16th Notes")
163
- ax.set_title("Rhythm Pattern")
164
-
165
- fig.savefig(f'samples/diy_examples/rhythm_plot_{label}.png')
166
- plt.close(fig)
167
- return f'samples/diy_examples/rhythm_plot_{label}.png'
168
-
169
- def adjust_rhythm_string(s):
170
- # Truncate if longer than 16 characters
171
- if len(s) > 16:
172
- return s[:16]
173
- # Pad with zeros if shorter than 16 characters
174
- else:
175
- return s.ljust(16, '0')
176
- def rhythm_string_to_array(s):
177
- # Ensure the string is 16 characters long
178
- s = s[:16].ljust(16, '0') # Truncate or pad with '0' to make it 16 characters
179
- # Convert to numpy array, treating non-'0' as '1'
180
- arr = np.array([1 if char != '0' else 0 for char in s], dtype=int)
181
- arr = arr*np.array([3,1,2,1,3,1,2,1,3,1,2,1,3,1,2,1])
182
- print(arr)
183
- return arr
184
-
185
  # Gradio main function
186
  def generate_from_example(prompt):
187
- midi_output, audio_output, midi = generate_music(prompt, tempo=80, mode="example", rhythm_control=False)
188
  piano_roll_image = visualize_midi(midi)
189
  return audio_output, piano_roll_image
190
 
191
- def generate_diy(m1_chord, m2_chord, m3_chord, m4_chord,
192
- m1_rhythm, m2_rhythm, m3_rhythm, m4_rhythm, tempo):
193
- print("\n\n\n",m1_chord,type(m1_chord), "\n\n\n")
194
- test_chd_roll = np.concatenate([np.tile(CHORD_DICTIONARY[m1_chord], (16, 1)),
195
- np.tile(CHORD_DICTIONARY[m2_chord], (16, 1)),
196
- np.tile(CHORD_DICTIONARY[m3_chord], (16, 1)),
197
- np.tile(CHORD_DICTIONARY[m4_chord], (16, 1))])
198
- rhythms = [m1_rhythm, m2_rhythm, m3_rhythm, m4_rhythm]
199
-
200
- chd_roll = np.concatenate([test_chd_roll[np.newaxis,:,:], test_chd_roll[np.newaxis,:,:]], axis=0)
201
-
202
- chd_roll = circular_extend(chd_roll)
203
- chd_roll = -chd_roll-1
204
-
205
- real_chd_roll = chd_roll
206
-
207
- melody_roll = -np.ones_like(chd_roll)
208
-
209
- if "null rhythm" not in rhythms:
210
- rhythm_full = []
211
- for i in range(len(rhythms)):
212
- rhythm = adjust_rhythm_string(rhythms[i])
213
- rhythm = rhythm_string_to_array(rhythm)
214
- rhythm_full.append(rhythm)
215
- rhythm_full = np.concatenate(rhythm_full, axis=0)
216
-
217
- onset_roll = test_chd_roll*rhythm_full[:, np.newaxis]
218
- sustain_roll = np.zeros_like(onset_roll)
219
- no_onset_pos = np.all(onset_roll == 0, axis=-1)
220
- sustain_roll[no_onset_pos] = test_chd_roll[no_onset_pos]
221
-
222
- real_chd_roll = np.concatenate([onset_roll[np.newaxis,:,:], sustain_roll[np.newaxis,:,:]], axis=0)
223
- real_chd_roll = circular_extend(real_chd_roll)
224
-
225
- background_condition = np.concatenate([real_chd_roll, chd_roll, melody_roll], axis=0)
226
-
227
- midi_output, audio_output, midi = generate_music(background_condition, tempo, mode="diy")
228
- piano_roll_image = visualize_midi(midi)
229
- return midi_output, audio_output, piano_roll_image
230
-
231
  # Prompt list
232
  prompt_list = ["example 1", "example 2", "example 3", "example 4"]
233
- rhythm_list = ["null rhythm", "1010101010101010", "1011101010111010","1111101010111010","1010001010101010","1010101000101010"]
234
-
235
 
236
  custom_css = """
237
- .custom-row1 {
238
- background-color: #fdebd0;
239
- padding: 10px;
240
- border-radius: 5px;
241
- }
242
- .custom-row2 {
243
- background-color: #d1f2eb;
244
- padding: 10px;
245
- border-radius: 5px;
246
- }
247
- .custom-grey {
248
- background-color: #f0f0f0;
249
- padding: 10px;
250
- border-radius: 5px;
251
- }
252
  .custom-purple {
253
  background-color: #d7bde2;
254
  padding: 10px;
@@ -259,227 +143,38 @@ custom_css = """
259
  }
260
  """
261
 
262
-
263
  with gr.Blocks(css=custom_css) as demo:
264
- gr.Markdown("# <div style='text-align: center;font-size:40px'> Efficient Fine-Grained Guidance for Diffusion-Based Symbolic Music Generation <div style='text-align: center;'>")
265
 
266
- gr.Markdown("<span style='font-size:25px;'> We introduce **Fine-Grained Guidance (FG)**, an efficient approach for symbolic music generation using **diffusion models**. Our method enhances guidance through:\
267
- \n &emsp; (1) Fine-grained conditioning during training,\
268
- \n &emsp; (2) Fine-grained control during the diffusion sampling process.\
269
- \n In particular, **sampling control** ensures tonal accuracy in every generated sample, allowing our model to produce music with high precision, consistent rhythmic patterns,\
270
- and even stylistic variations that align with user intent.<span>")
271
- gr.Markdown("<span style='font-size:25px;color: red'> At the bottom of this page, we provide an interactive space for you to try our model by yourself! <span>")
272
 
 
 
 
 
273
 
274
- gr.Markdown("\n\n\n")
275
- gr.Markdown("# 1. Accompaniment Generation given Melody and Chord")
276
- gr.Markdown("<span style='font-size:20px;'> In each example, the left column displays the melody provided as inputs to the model.\
277
- The right column showcases music samples generated by the model.<span>")
278
-
279
- with gr.Column(elem_classes="custom-row1"):
280
- gr.Markdown("## Example 1")
281
- with gr.Row():
282
- with gr.Column():
283
- gr.Markdown("<span style='font-size:20px;'> With the following melody as condition <span>")
284
- example1_mel = gr.Audio(value="samples/diy_examples/example1/example_1_mel.wav", label="Melody", scale = 5)
285
- with gr.Column():
286
- gr.Markdown("<span style='font-size:20px;'> Generated Accompaniments <span>")
287
- example1_audio = gr.Audio(value="samples/diy_examples/example1/sample1.wav", label="Generated Accompaniment", scale = 5)
288
-
289
- with gr.Column(elem_classes="custom-row2"):
290
- gr.Markdown("## Example 2")
291
- with gr.Row():
292
- with gr.Column():
293
- gr.Markdown("<span style='font-size:20px;'> With the following melody as condition <span>")
294
- example1_mel = gr.Audio(value="samples/diy_examples/example2/example_2_mel.wav", label="Melody", scale = 5)
295
- with gr.Column():
296
- gr.Markdown("<span style='font-size:20px;'> Generated Accompaniments <span>")
297
- example1_audio = gr.Audio(value="samples/diy_examples/example2/sample1.wav", label="Generated Accompaniment", scale = 5)
298
-
299
- with gr.Column(elem_classes="custom-row1"):
300
- gr.Markdown("## Example 3")
301
- with gr.Row():
302
- with gr.Column():
303
- gr.Markdown("<span style='font-size:20px;'> With the following melody as condition <span>")
304
- example1_mel = gr.Audio(value="samples/diy_examples/example3/example_3_mel.wav", label="Melody", scale = 5)
305
- with gr.Column():
306
- gr.Markdown("<span style='font-size:20px;'> Generated Accompaniments <span>")
307
- example1_audio = gr.Audio(value="samples/diy_examples/example3/sample1.wav", label="Generated Accompaniment", scale = 5)
308
-
309
- with gr.Column(elem_classes="custom-row2"):
310
- gr.Markdown("## Example 4")
311
- with gr.Row():
312
- with gr.Column():
313
- gr.Markdown("<span style='font-size:20px;'> With the following melody as condition <span>")
314
- example1_mel = gr.Audio(value="samples/diy_examples/example4/example_4_mel.wav", label="Melody", scale = 5)
315
- with gr.Column():
316
- gr.Markdown("<span style='font-size:20px;'> Generated Accompaniments <span>")
317
- example1_audio = gr.Audio(value="samples/diy_examples/example4/sample1.wav", label="Generated Accompaniment", scale = 5)
318
-
319
- gr.HTML("<div style='height: 50px;'></div>")
320
- gr.Markdown("# \n\n\n")
321
- gr.Markdown("# 2. Style-Controlled Music Generation")
322
- gr.Markdown("<span style='font-size:20px;'>Our approach enables controllable stylization in music generation. The sampling control is able to\
323
- ensure that all generated notes strictly adhere to the target musical style's scale.\
324
- This allows the model to generate music in specific styles — even those that were not present in \
325
- the training data.<span>")
326
- gr.Markdown("<span style='font-size:20px;'> Below, we demonstrate several examples of style-controlled music generation for:\
327
- \n &emsp; (1) Dorian Mode: (with scale being A-B-C-D-E-F#-G);\
328
- \n &emsp; (2) Chinese Style: (with scale being C-D-E-G-A). <span>")
329
-
330
- with gr.Column(elem_classes="custom-row1"):
331
- gr.Markdown("## Dorian Mode")
332
- gr.Markdown("<span style='font-size:20px;'> The following are two examples generated by our method <span>")
333
- with gr.Row():
334
- with gr.Column(elem_classes="custom-grey"):
335
- gr.Markdown("<span style='font-size:20px;'> Example 1 <span>")
336
- example1_mel = gr.Audio(value="samples/different_styles/dorian_1.wav", scale = 5)
337
- with gr.Column(elem_classes="custom-grey"):
338
- gr.Markdown("<span style='font-size:20px;'> Example 2 <span>")
339
- example1_audio = gr.Audio(value="samples/different_styles/dorian_2.wav", scale = 5)
340
-
341
- with gr.Column(elem_classes="custom-row2"):
342
- gr.Markdown("## Chinese Style")
343
- gr.Markdown("<span style='font-size:20px;'> The following are two examples generated by our method <span>")
344
- with gr.Row():
345
- with gr.Column(elem_classes="custom-grey"):
346
- gr.Markdown("<span style='font-size:20px;'> Example 1 <span>")
347
- example1_mel = gr.Audio(value="samples/different_styles/chinese_1.wav", scale = 5)
348
- with gr.Column(elem_classes="custom-grey"):
349
- gr.Markdown("<span style='font-size:20px;'> Example 2 <span>")
350
- example1_audio = gr.Audio(value="samples/different_styles/chinese_2.wav", scale = 5)
351
-
352
  gr.HTML("<div style='height: 50px;'></div>")
353
  gr.Markdown("\n\n\n")
354
- gr.Markdown("# 3. Demonstrating the Effectiveness of Sampling Control by Comparison")
355
-
356
- gr.Markdown("<span style='font-size:20px;'> We demonstrate the impact of sampling control in an **accompaniment generation** task, given a melody and chord progression.\
357
- \n Each example generates accompaniments with and without sampling control using the same random seed, ensuring that the two results are comparable.\
358
- \n Sampling control effectively removes or replaces harmonically conflicting notes, ensuring tonal consistency.\
359
- \n We provide music sheets and audio files for both versions.<span>")
360
-
361
- gr.Markdown("<span style='font-size:20px;'> Comparison of the results indicates that sampling control not only eliminates out-of-key notes but also enhances \
362
- the overall coherence and harmonic consistency of the accompaniments.\
363
- This highlights the effectiveness of our approach in maintaining musical coherence. <span>")
364
-
365
-
366
- with gr.Column(elem_classes="custom-row1"):
367
- gr.Markdown("## Example 1")
368
-
369
- with gr.Row(elem_classes="custom-grey"):
370
- gr.Markdown("<span style='font-size:20px;'> With pre-defined melody and chord as follows<span>")
371
- with gr.Column(scale=2, min_width=10, ):
372
- gr.Markdown("Melody Sheet")
373
- example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_1_mel_chd.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
374
- with gr.Column(scale=1, min_width=10, ):
375
- gr.Markdown("Melody Audio")
376
- example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_mel_chd.wav", label="Melody, wav", waveform_options=gr.WaveformOptions(show_recording_waveform=False), scale = 1, min_width=10)
377
-
378
- gr.Markdown("## Generated Accompaniments")
379
- with gr.Row(elem_classes="custom-grey"):
380
- gr.Markdown("<span style='font-size:20px;'> Without sampling control<span>")
381
- with gr.Column(scale=2, min_width=300):
382
- gr.Markdown("Music Sheet")
383
- example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_1_acc_uncontrol.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
384
- with gr.Column(scale=1, min_width=150):
385
- gr.Markdown("Audio")
386
- example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_acc_uncontrol.wav", scale = 1, min_width=10)
387
- gr.Markdown("\n\n\n")
388
- with gr.Row(elem_classes="custom-grey"):
389
- with gr.Column(scale=1, min_width=150):
390
- gr.Markdown("<span style='font-size:20px;'>With sampling control<span>")
391
- with gr.Column(scale=2, min_width=300):
392
- gr.Markdown("Music Sheet")
393
- example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_1_acc_control.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
394
- with gr.Column(scale=1, min_width=150):
395
- gr.Markdown("Audio")
396
- example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_acc_control.wav", scale = 1, min_width=10)
397
-
398
-
399
- with gr.Column(elem_classes="custom-row2"):
400
- gr.Markdown("## Example 2")
401
-
402
- with gr.Row(elem_classes="custom-grey"):
403
- gr.Markdown("<span style='font-size:20px;'> With pre-defined melody and chord as follows<span>")
404
- with gr.Column(scale=2, min_width=10, ):
405
- gr.Markdown("Melody Sheet")
406
- example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_2_mel_chd.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
407
- with gr.Column(scale=1, min_width=10, ):
408
- gr.Markdown("Melody Audio")
409
- example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_2_mel_chd.wav", label="Melody, wav", waveform_options=gr.WaveformOptions(show_recording_waveform=False), scale = 1, min_width=10)
410
-
411
- gr.Markdown("## Generated Accompaniments")
412
- with gr.Row(elem_classes="custom-grey"):
413
- gr.Markdown("<span style='font-size:20px;'> Without sampling control<span>")
414
- with gr.Column(scale=2, min_width=300):
415
- gr.Markdown("Music Sheet")
416
- example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_2_acc_uncontrol.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
417
- with gr.Column(scale=1, min_width=150):
418
- gr.Markdown("Audio")
419
- example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_2_acc_uncontrol.wav", scale = 1, min_width=10)
420
- gr.Markdown("\n\n\n")
421
- with gr.Row(elem_classes="custom-grey"):
422
- with gr.Column(scale=1, min_width=150):
423
- gr.Markdown("<span style='font-size:20px;'>With sampling control<span>")
424
- with gr.Column(scale=2, min_width=300):
425
- gr.Markdown("Music Sheet")
426
- example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_2_acc_control.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
427
- with gr.Column(scale=1, min_width=150):
428
- gr.Markdown("Audio")
429
- example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_2_acc_control.wav", scale = 1, min_width=10)
430
-
431
- # with gr.Row():
432
- # with gr.Column(scale=1, min_width=300, elem_classes="custom-row1"):
433
- # gr.Markdown("## Example 1")
434
- # gr.Markdown("<span style='font-size:20px;'> With pre-defined melody and chord as follows<span>")
435
- # example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_1_mel_chd.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
436
- # # Audio component to play the audio
437
- # example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_mel_chd.wav", label="Melody, wav", waveform_options=gr.WaveformOptions(show_recording_waveform=False), scale = 1, min_width=10)
438
-
439
- # gr.Markdown("## Generated Accompaniments")
440
- # with gr.Row():
441
- # with gr.Column(scale=1, min_width=150):
442
- # gr.Markdown("<span style='font-size:20px;'> without sampling control<span>")
443
- # example1_sheet = gr.Image(value="samples/control_vs_uncontrol/sample_1.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
444
- # example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_acc_uncontrol.wav", scale = 1, min_width=10)
445
- # with gr.Column(scale=1, min_width=150):
446
- # gr.Markdown("<span style='font-size:20px;'> with sampling control<span>")
447
- # example1_sheet = gr.Image(value="samples/control_vs_uncontrol/sample_1.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
448
- # example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_acc_control.wav", scale = 1, min_width=10)
449
- # with gr.Column(scale=1, min_width=300, elem_classes="custom-row2"):
450
- # gr.Markdown("## Example 2")
451
- # gr.Markdown("<span style='font-size:20px;'> With pre-defined melody and chord as follows<span>")
452
- # example1_sheet = gr.Image(value="samples/control_vs_uncontrol/example_1_mel_chd.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
453
- # # Audio component to play the audio
454
- # example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_mel_chd.wav", label="Melody, wav", waveform_options=gr.WaveformOptions(show_recording_waveform=False), scale = 1, min_width=10)
455
-
456
- # gr.Markdown("## Generated Accompaniments")
457
- # with gr.Row():
458
- # with gr.Column(scale=1, min_width=150):
459
- # gr.Markdown("<span style='font-size:20px;'> without sampling control<span>")
460
- # example1_sheet = gr.Image(value="samples/control_vs_uncontrol/sample_1.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
461
- # example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_acc_uncontrol.wav", scale = 1, min_width=10)
462
- # with gr.Column(scale=1, min_width=150):
463
- # gr.Markdown("<span style='font-size:20px;'> with sampling control<span>")
464
- # example1_sheet = gr.Image(value="samples/control_vs_uncontrol/sample_1.jpg", label="Music Sheet of Melody and Chord", scale=1, min_width=10)
465
- # example1_melody = gr.Audio(value="samples/control_vs_uncontrol/example_1_acc_control.wav", scale = 1, min_width=10)
466
-
467
-
468
 
469
-
470
-
471
- ''' Try to generate by users '''
472
- gr.HTML("<div style='height: 50px;'></div>")
473
- gr.Markdown("\n\n\n")
474
- gr.Markdown("# <span style='color: red;'> 4. DIY in real time! </span>")
475
- gr.Markdown("<span style='font-size:20px;'> Here is an interactive tool for you to try our model and generate by yourself.\
476
- You can generate new accompaniments for given melody and chord conditions <span>")
477
-
478
- gr.Markdown("### <span style='color: blue;'> Currently this space is supported with Hugging Face CPU and on average,\
479
- it takes about 15 seconds to generate a 4-measure music piece. However, if other users are generating\
480
- music at the same time, one may enter a queue, which could slow down the process significantly.\
481
- If that happens, feel free to refresh the page. We appreciate your patience and understanding.\
482
- </span>")
483
 
484
  with gr.Column(elem_classes="custom-purple"):
485
  gr.Markdown("### Select an example to generate music given melody and chord condition")
@@ -502,7 +197,6 @@ with gr.Blocks(css=custom_css) as demo:
502
  outputs=[audio_output, piano_roll_output]
503
  )
504
 
505
-
506
  # Launch Gradio interface
507
  if __name__ == "__main__":
508
  demo.launch()
 
57
  elif selected_prompt == "example 6":
58
  return "samples/diy_examples/example6/example6.jpg"
59
 
60
+ # Model for generating music
 
61
  def generate_music(prompt, tempo, num_samples=1, mode="example", rhythm_control="Yes"):
 
62
  ldm_model = init_ldm_model(params_chord_lsh_cond, debug_mode=False)
63
  model = Diffpro_SDF.load_trained(ldm_model, model_path).to(device)
64
  sampler = SDFSampler(model.ldm, 64, 64, is_autocast=False, device=device, debug_mode=False)
 
99
  if background_condition[:,4:6,:,:].min()>=0:
100
  full_lsh_roll = extend_piano_roll(background_condition[i,4:6,:,:].cpu().numpy())
101
  midi_file = piano_roll_to_midi(full_roll, full_chd_roll, full_lsh_roll, bpm=tempo)
 
102
  filename = f"output_{i}.mid"
103
  save_midi(midi_file, filename)
104
  subprocess.Popen(['timidity',f'output_{i}.mid','-Ow','-o',f'output_{i}.wav']).communicate()
 
123
  plt.savefig(output_image_path)
124
  return output_image_path
125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  # Gradio main function
127
  def generate_from_example(prompt):
128
+ midi_output, audio_output, midi = generate_music(prompt, tempo=80, mode="example", rhythm_control="No")
129
  piano_roll_image = visualize_midi(midi)
130
  return audio_output, piano_roll_image
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  # Prompt list
133
  prompt_list = ["example 1", "example 2", "example 3", "example 4"]
 
 
134
 
135
  custom_css = """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  .custom-purple {
137
  background-color: #d7bde2;
138
  padding: 10px;
 
143
  }
144
  """
145
 
 
146
  with gr.Blocks(css=custom_css) as demo:
147
+ gr.Markdown("# <div style='text-align: center;font-size:40px'> Efficient Fine-Grained Guidance for Diffusion Model Based Symbolic Music Generation <div style='text-align: center;'>")
148
 
149
+ gr.Markdown("<div style='text-align: center;font-size:20px'>Tingyu Zhu<sup>*</sup>, Haoyu Liu<sup>*</sup>, Ziyu Wang, Zhimin Jiang, Zeyu Zheng</div>")
150
+ gr.Markdown("<div style='text-align: center;font-size:20px'><a href='https://arxiv.org/abs/2410.08435'>[Paper]</a> <a href='https://github.com/huajianduzhuo-code/FGG-music-code'>[Code Repo]</a></div>")
 
 
 
 
151
 
152
+ gr.Markdown("<span style='font-size:25px;'> For detailed information and demonstrations of our method, please visit our [GitHub Pages site](https://huajianduzhuo-code.github.io/FGG-diffusion-music/) to explore:\
153
+ \n &emsp; 1. Accompaniment Generation given Melody and Chord\
154
+ \n &emsp; 2. Style-Controlled Music Generation\
155
+ \n &emsp; 3. Demonstrating the Effectiveness of Sampling Control by Comparison</span>")
156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  gr.HTML("<div style='height: 50px;'></div>")
158
  gr.Markdown("\n\n\n")
159
+ gr.Markdown("# <span style='color: red;'> Interactive Demo </span>")
160
+ gr.Markdown(
161
+ "<span style='font-size:20px;'>"
162
+ "🎵 Try out our interactive tool to generate music with our model!<br>"
163
+ "You can create new accompaniments conditioned on a given melody and chord progression."
164
+ "</span>"
165
+ )
166
+
167
+ gr.Markdown(
168
+ "<span style='color:blue; font-size:20px;'>"
169
+ "⚠️ This Space currently runs on a Hugging Face-provided CPU. On average, it takes ~15 seconds to generate a 4-measure music segment.<br>"
170
+ "If multiple users are generating at the same time, you may enter a queue, which can cause delays.<br><br>"
171
+ "🚀 On our local server (NVIDIA RTX 6000 Ada GPU), the same generation takes only 0.4 seconds.<br><br>"
172
+ "To speed things up, you can: <br>"
173
+ "• 🔁 Fork this Space and select a different hardware configuration<br>"
174
+ "• 🧑‍💻 Clone our <a href='https://github.com/huajianduzhuo-code/FGG-music-code'>[Code Repo]</a> and run the generation notebooks locally after installing dependencies and downloading the model weights."
175
+ "</span>"
176
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
  with gr.Column(elem_classes="custom-purple"):
180
  gr.Markdown("### Select an example to generate music given melody and chord condition")
 
197
  outputs=[audio_output, piano_roll_output]
198
  )
199
 
 
200
  # Launch Gradio interface
201
  if __name__ == "__main__":
202
  demo.launch()
generation/__pycache__/gen_utils.cpython-39.pyc CHANGED
Binary files a/generation/__pycache__/gen_utils.cpython-39.pyc and b/generation/__pycache__/gen_utils.cpython-39.pyc differ
 
model/__pycache__/__init__.cpython-39.pyc CHANGED
Binary files a/model/__pycache__/__init__.cpython-39.pyc and b/model/__pycache__/__init__.cpython-39.pyc differ
 
model/__pycache__/latent_diffusion.cpython-39.pyc CHANGED
Binary files a/model/__pycache__/latent_diffusion.cpython-39.pyc and b/model/__pycache__/latent_diffusion.cpython-39.pyc differ
 
model/__pycache__/model_sdf.cpython-39.pyc CHANGED
Binary files a/model/__pycache__/model_sdf.cpython-39.pyc and b/model/__pycache__/model_sdf.cpython-39.pyc differ
 
model/__pycache__/sampler_sdf.cpython-39.pyc CHANGED
Binary files a/model/__pycache__/sampler_sdf.cpython-39.pyc and b/model/__pycache__/sampler_sdf.cpython-39.pyc differ
 
model/architecture/__pycache__/unet.cpython-39.pyc CHANGED
Binary files a/model/architecture/__pycache__/unet.cpython-39.pyc and b/model/architecture/__pycache__/unet.cpython-39.pyc differ
 
model/architecture/__pycache__/unet_attention.cpython-39.pyc CHANGED
Binary files a/model/architecture/__pycache__/unet_attention.cpython-39.pyc and b/model/architecture/__pycache__/unet_attention.cpython-39.pyc differ
 
output_0.mid CHANGED
Binary files a/output_0.mid and b/output_0.mid differ
 
output_0.wav CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f9abffb8b039f86161f025cab6419eecf93ec741ea67e66964ca8e79d333c9d4
3
- size 2469720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15dcddbf23bcae2e04eb588ef0023e0e610d4b4f03709c5a777b4141a24d160e
3
+ size 2772208
piano_roll.png CHANGED

Git LFS Details

  • SHA256: cf4d433689089c7895ed3ebf569e2dda9284a8d443481f6d8aa9f9575089cc37
  • Pointer size: 130 Bytes
  • Size of remote file: 16.4 kB

Git LFS Details

  • SHA256: 475f8a03707e99982a584e8cbe515cd6400233ebce2ca9447fbe76c27a8623ee
  • Pointer size: 130 Bytes
  • Size of remote file: 18.5 kB
train/__pycache__/__init__.cpython-39.pyc CHANGED
Binary files a/train/__pycache__/__init__.cpython-39.pyc and b/train/__pycache__/__init__.cpython-39.pyc differ
 
train/__pycache__/learner.cpython-39.pyc CHANGED
Binary files a/train/__pycache__/learner.cpython-39.pyc and b/train/__pycache__/learner.cpython-39.pyc differ
 
train/__pycache__/train_params.cpython-39.pyc CHANGED
Binary files a/train/__pycache__/train_params.cpython-39.pyc and b/train/__pycache__/train_params.cpython-39.pyc differ