Datasets:
Upload 2 files
Browse files- Monster_Music_Transformer.ipynb +964 -0
- monster_music_transformer.py +809 -0
Monster_Music_Transformer.ipynb
ADDED
|
@@ -0,0 +1,964 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"source": [
|
| 6 |
+
"# Monster Music Transformer (ver. 1.0)\n",
|
| 7 |
+
"\n",
|
| 8 |
+
"***\n",
|
| 9 |
+
"\n",
|
| 10 |
+
"Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools\n",
|
| 11 |
+
"\n",
|
| 12 |
+
"***\n",
|
| 13 |
+
"\n",
|
| 14 |
+
"WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/\n",
|
| 15 |
+
"\n",
|
| 16 |
+
"***\n",
|
| 17 |
+
"\n",
|
| 18 |
+
"#### Project Los Angeles\n",
|
| 19 |
+
"\n",
|
| 20 |
+
"#### Tegridy Code 2024\n",
|
| 21 |
+
"\n",
|
| 22 |
+
"***"
|
| 23 |
+
],
|
| 24 |
+
"metadata": {
|
| 25 |
+
"id": "gpy3qsulqHa5"
|
| 26 |
+
}
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
"cell_type": "markdown",
|
| 30 |
+
"source": [
|
| 31 |
+
"# (GPU CHECK)"
|
| 32 |
+
],
|
| 33 |
+
"metadata": {
|
| 34 |
+
"id": "W_So4w8fqPGL"
|
| 35 |
+
}
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"cell_type": "code",
|
| 39 |
+
"execution_count": null,
|
| 40 |
+
"metadata": {
|
| 41 |
+
"id": "X3rABEpKCO02",
|
| 42 |
+
"cellView": "form"
|
| 43 |
+
},
|
| 44 |
+
"outputs": [],
|
| 45 |
+
"source": [
|
| 46 |
+
"#@title NVIDIA GPU check\n",
|
| 47 |
+
"!nvidia-smi"
|
| 48 |
+
]
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
"cell_type": "markdown",
|
| 52 |
+
"source": [
|
| 53 |
+
"# (SETUP ENVIRONMENT)"
|
| 54 |
+
],
|
| 55 |
+
"metadata": {
|
| 56 |
+
"id": "C0XxnXGFqVyh"
|
| 57 |
+
}
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"cell_type": "code",
|
| 61 |
+
"execution_count": null,
|
| 62 |
+
"metadata": {
|
| 63 |
+
"id": "vK40g6V_BTNj",
|
| 64 |
+
"cellView": "form"
|
| 65 |
+
},
|
| 66 |
+
"outputs": [],
|
| 67 |
+
"source": [
|
| 68 |
+
"#@title Install dependencies\n",
|
| 69 |
+
"!git clone --depth 1 https://github.com/asigalov61/Monster-MIDI-Dataset\n",
|
| 70 |
+
"!pip install huggingface_hub\n",
|
| 71 |
+
"!pip install einops\n",
|
| 72 |
+
"!pip install torch-summary\n",
|
| 73 |
+
"!apt install fluidsynth #Pip does not work for some reason. Only apt works"
|
| 74 |
+
]
|
| 75 |
+
},
|
| 76 |
+
{
|
| 77 |
+
"cell_type": "code",
|
| 78 |
+
"execution_count": null,
|
| 79 |
+
"metadata": {
|
| 80 |
+
"id": "DzCOZU_gBiQV",
|
| 81 |
+
"cellView": "form"
|
| 82 |
+
},
|
| 83 |
+
"outputs": [],
|
| 84 |
+
"source": [
|
| 85 |
+
"#@title Import modules\n",
|
| 86 |
+
"\n",
|
| 87 |
+
"print('=' * 70)\n",
|
| 88 |
+
"print('Loading core Monster Music Transformer modules...')\n",
|
| 89 |
+
"\n",
|
| 90 |
+
"import os\n",
|
| 91 |
+
"import copy\n",
|
| 92 |
+
"import pickle\n",
|
| 93 |
+
"import secrets\n",
|
| 94 |
+
"import statistics\n",
|
| 95 |
+
"from time import time\n",
|
| 96 |
+
"import tqdm\n",
|
| 97 |
+
"\n",
|
| 98 |
+
"print('=' * 70)\n",
|
| 99 |
+
"print('Loading main Monster Music Transformer modules...')\n",
|
| 100 |
+
"import torch\n",
|
| 101 |
+
"\n",
|
| 102 |
+
"%cd /content/Monster-MIDI-Dataset\n",
|
| 103 |
+
"\n",
|
| 104 |
+
"import TMIDIX\n",
|
| 105 |
+
"\n",
|
| 106 |
+
"from midi_to_colab_audio import midi_to_colab_audio\n",
|
| 107 |
+
"\n",
|
| 108 |
+
"from x_transformer_1_27_16 import *\n",
|
| 109 |
+
"\n",
|
| 110 |
+
"import random\n",
|
| 111 |
+
"\n",
|
| 112 |
+
"%cd /content/\n",
|
| 113 |
+
"print('=' * 70)\n",
|
| 114 |
+
"print('Loading aux Monster Music Transformer modules...')\n",
|
| 115 |
+
"\n",
|
| 116 |
+
"import matplotlib.pyplot as plt\n",
|
| 117 |
+
"\n",
|
| 118 |
+
"from torchsummary import summary\n",
|
| 119 |
+
"from sklearn import metrics\n",
|
| 120 |
+
"\n",
|
| 121 |
+
"from IPython.display import Audio, display\n",
|
| 122 |
+
"\n",
|
| 123 |
+
"from huggingface_hub import hf_hub_download\n",
|
| 124 |
+
"\n",
|
| 125 |
+
"from google.colab import files\n",
|
| 126 |
+
"\n",
|
| 127 |
+
"print('=' * 70)\n",
|
| 128 |
+
"print('Done!')\n",
|
| 129 |
+
"print('Enjoy! :)')\n",
|
| 130 |
+
"print('=' * 70)"
|
| 131 |
+
]
|
| 132 |
+
},
|
| 133 |
+
{
|
| 134 |
+
"cell_type": "markdown",
|
| 135 |
+
"metadata": {
|
| 136 |
+
"id": "eI3aQtHzqSnp"
|
| 137 |
+
},
|
| 138 |
+
"source": [
|
| 139 |
+
"# (LOAD MODEL)"
|
| 140 |
+
]
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"cell_type": "code",
|
| 144 |
+
"source": [
|
| 145 |
+
"#@title Load Monster Music Transformer Pre-Trained Model\n",
|
| 146 |
+
"\n",
|
| 147 |
+
"#@markdown Choose model\n",
|
| 148 |
+
"\n",
|
| 149 |
+
"select_model_to_load = \"651M-32L-Fast-Large\" # @param [\"651M-32L-Fast-Large\"]\n",
|
| 150 |
+
"\n",
|
| 151 |
+
"#@markdown Model precision option\n",
|
| 152 |
+
"\n",
|
| 153 |
+
"model_precision = \"bfloat16\" # @param [\"bfloat16\", \"float16\"]\n",
|
| 154 |
+
"\n",
|
| 155 |
+
"#@markdown bfloat16 == Half precision/faster speed (if supported, otherwise the model will default to float16)\n",
|
| 156 |
+
"\n",
|
| 157 |
+
"#@markdown float16 == Full precision/fast speed\n",
|
| 158 |
+
"\n",
|
| 159 |
+
"plot_tokens_embeddings = \"None\" # @param [\"None\", \"Start Times\", \"Durations Velocities\", \"Piano Pitches\", \"Drums Pitches\", \"Aux\"]\n",
|
| 160 |
+
"\n",
|
| 161 |
+
"print('=' * 70)\n",
|
| 162 |
+
"print('Loading Monster Music Transformer', select_model_to_load,'Pre-Trained Model...')\n",
|
| 163 |
+
"print('Please wait...')\n",
|
| 164 |
+
"print('=' * 70)\n",
|
| 165 |
+
"\n",
|
| 166 |
+
"full_path_to_models_dir = \"/content/Monster-MIDI-Dataset/\"\n",
|
| 167 |
+
"\n",
|
| 168 |
+
"if select_model_to_load == '651M-32L-Fast-Large':\n",
|
| 169 |
+
"\n",
|
| 170 |
+
" model_checkpoint_file_name = 'Monster_Music_Transformer_Large_Trained_Model_22501_steps_0.3419_loss_0.9121_acc.pth'\n",
|
| 171 |
+
" model_path = full_path_to_models_dir+'/'+model_checkpoint_file_name\n",
|
| 172 |
+
" num_layers = 36\n",
|
| 173 |
+
" if os.path.isfile(model_path):\n",
|
| 174 |
+
" print('Model already exists...')\n",
|
| 175 |
+
"\n",
|
| 176 |
+
" else:\n",
|
| 177 |
+
" hf_hub_download(repo_id='asigalov61/Monster-Music-Transformer',\n",
|
| 178 |
+
" filename=model_checkpoint_file_name,\n",
|
| 179 |
+
" local_dir='/content/Monster-MIDI-Dataset',\n",
|
| 180 |
+
" local_dir_use_symlinks=False)\n",
|
| 181 |
+
"\n",
|
| 182 |
+
"print('=' * 70)\n",
|
| 183 |
+
"print('Instantiating model...')\n",
|
| 184 |
+
"\n",
|
| 185 |
+
"device_type = 'cuda'\n",
|
| 186 |
+
"\n",
|
| 187 |
+
"if model_precision == 'bfloat16' and torch.cuda.is_bf16_supported():\n",
|
| 188 |
+
" dtype = 'bfloat16'\n",
|
| 189 |
+
"else:\n",
|
| 190 |
+
" dtype = 'float16'\n",
|
| 191 |
+
"\n",
|
| 192 |
+
"if model_precision == 'float16':\n",
|
| 193 |
+
" dtype = 'float16'\n",
|
| 194 |
+
"\n",
|
| 195 |
+
"ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]\n",
|
| 196 |
+
"ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype)\n",
|
| 197 |
+
"\n",
|
| 198 |
+
"SEQ_LEN = 8192\n",
|
| 199 |
+
"\n",
|
| 200 |
+
"# instantiate the model\n",
|
| 201 |
+
"\n",
|
| 202 |
+
"model = TransformerWrapper(\n",
|
| 203 |
+
" num_tokens = 19080,\n",
|
| 204 |
+
" max_seq_len = SEQ_LEN,\n",
|
| 205 |
+
" attn_layers = Decoder(dim = 1024, depth = num_layers, heads = 32, attn_flash=True)\n",
|
| 206 |
+
")\n",
|
| 207 |
+
"\n",
|
| 208 |
+
"model = AutoregressiveWrapper(model, ignore_index=19079)\n",
|
| 209 |
+
"\n",
|
| 210 |
+
"model.cuda()\n",
|
| 211 |
+
"print('=' * 70)\n",
|
| 212 |
+
"\n",
|
| 213 |
+
"print('Loading model checkpoint...')\n",
|
| 214 |
+
"\n",
|
| 215 |
+
"model.load_state_dict(torch.load(model_path))\n",
|
| 216 |
+
"print('=' * 70)\n",
|
| 217 |
+
"\n",
|
| 218 |
+
"model.eval()\n",
|
| 219 |
+
"\n",
|
| 220 |
+
"print('Done!')\n",
|
| 221 |
+
"print('=' * 70)\n",
|
| 222 |
+
"\n",
|
| 223 |
+
"print('Model will use', dtype, 'precision...')\n",
|
| 224 |
+
"print('=' * 70)\n",
|
| 225 |
+
"\n",
|
| 226 |
+
"# Model stats\n",
|
| 227 |
+
"print('Model summary...')\n",
|
| 228 |
+
"summary(model)\n",
|
| 229 |
+
"\n",
|
| 230 |
+
"# Plot Token Embeddings\n",
|
| 231 |
+
"if plot_tokens_embeddings != 'None':\n",
|
| 232 |
+
" tok_emb = model.net.token_emb.emb.weight.detach().cpu().tolist()\n",
|
| 233 |
+
"\n",
|
| 234 |
+
"if plot_tokens_embeddings == 'Start Times':\n",
|
| 235 |
+
" tok_range = [0, 256]\n",
|
| 236 |
+
"\n",
|
| 237 |
+
"elif plot_tokens_embeddings == 'Durations Velocities':\n",
|
| 238 |
+
" tok_range = [256, 2304]\n",
|
| 239 |
+
"\n",
|
| 240 |
+
"elif plot_tokens_embeddings == 'Piano Pitches':\n",
|
| 241 |
+
" tok_range = [2304, 2304+128]\n",
|
| 242 |
+
"\n",
|
| 243 |
+
"elif plot_tokens_embeddings == 'Drums Pitches':\n",
|
| 244 |
+
" tok_range = [18945-128, 18945]\n",
|
| 245 |
+
"\n",
|
| 246 |
+
"elif plot_tokens_embeddings == 'Aux':\n",
|
| 247 |
+
" tok_range = [18945, 19079]\n",
|
| 248 |
+
"\n",
|
| 249 |
+
"if plot_tokens_embeddings != 'None':\n",
|
| 250 |
+
"\n",
|
| 251 |
+
" tok_emb1 = []\n",
|
| 252 |
+
"\n",
|
| 253 |
+
" for t in tok_emb[tok_range[0]:tok_range[1]]:\n",
|
| 254 |
+
" tok_emb1.append(t)\n",
|
| 255 |
+
"\n",
|
| 256 |
+
" cos_sim = metrics.pairwise_distances(\n",
|
| 257 |
+
" tok_emb1, metric='cosine'\n",
|
| 258 |
+
" )\n",
|
| 259 |
+
" plt.figure(figsize=(7, 7))\n",
|
| 260 |
+
" plt.imshow(cos_sim, cmap=\"inferno\", interpolation=\"nearest\")\n",
|
| 261 |
+
" im_ratio = cos_sim.shape[0] / cos_sim.shape[1]\n",
|
| 262 |
+
" plt.colorbar(fraction=0.046 * im_ratio, pad=0.04)\n",
|
| 263 |
+
" plt.xlabel(\"Position\")\n",
|
| 264 |
+
" plt.ylabel(\"Position\")\n",
|
| 265 |
+
" plt.tight_layout()\n",
|
| 266 |
+
" plt.plot()\n",
|
| 267 |
+
" plt.savefig(\"/content/Monster-Music-Transformer-Tokens-Embeddings-Plot.png\", bbox_inches=\"tight\")"
|
| 268 |
+
],
|
| 269 |
+
"metadata": {
|
| 270 |
+
"id": "V4s_G8yUL0cH",
|
| 271 |
+
"cellView": "form"
|
| 272 |
+
},
|
| 273 |
+
"execution_count": null,
|
| 274 |
+
"outputs": []
|
| 275 |
+
},
|
| 276 |
+
{
|
| 277 |
+
"cell_type": "markdown",
|
| 278 |
+
"source": [
|
| 279 |
+
"# (GENERATE)"
|
| 280 |
+
],
|
| 281 |
+
"metadata": {
|
| 282 |
+
"id": "7xNyANjZsCOi"
|
| 283 |
+
}
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"cell_type": "markdown",
|
| 287 |
+
"source": [
|
| 288 |
+
"# (IMPROV)"
|
| 289 |
+
],
|
| 290 |
+
"metadata": {
|
| 291 |
+
"id": "BxepTeHVmmKO"
|
| 292 |
+
}
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"cell_type": "code",
|
| 296 |
+
"source": [
|
| 297 |
+
"#@title Standard Improv Generator\n",
|
| 298 |
+
"\n",
|
| 299 |
+
"#@markdown Improv type\n",
|
| 300 |
+
"\n",
|
| 301 |
+
"improv_type = \"Random Freestyle\" # @param [\"Random Freestyle\", \"Freestyle without Drums\", \"Freestyle with Drums\", \"Custom\"]\n",
|
| 302 |
+
"\n",
|
| 303 |
+
"#@markdown Custom Improv settings\n",
|
| 304 |
+
"\n",
|
| 305 |
+
"first_note_MIDI_patch_number = 0 # @param {type:\"slider\", min:0, max:128, step:1}\n",
|
| 306 |
+
"add_drums = False #@param {type:\"boolean\"}\n",
|
| 307 |
+
"\n",
|
| 308 |
+
"#@markdown Generation settings\n",
|
| 309 |
+
"\n",
|
| 310 |
+
"number_of_tokens_tp_generate = 546 # @param {type:\"slider\", min:30, max:8190, step:3}\n",
|
| 311 |
+
"number_of_batches_to_generate = 4 #@param {type:\"slider\", min:1, max:16, step:1}\n",
|
| 312 |
+
"temperature = 0.9 # @param {type:\"slider\", min:0.1, max:1, step:0.05}\n",
|
| 313 |
+
"\n",
|
| 314 |
+
"#@markdown Other settings\n",
|
| 315 |
+
"\n",
|
| 316 |
+
"render_MIDI_to_audio = True # @param {type:\"boolean\"}\n",
|
| 317 |
+
"\n",
|
| 318 |
+
"print('=' * 70)\n",
|
| 319 |
+
"print('Monster Music Transformer Standard Improv Model Generator')\n",
|
| 320 |
+
"print('=' * 70)\n",
|
| 321 |
+
"\n",
|
| 322 |
+
"if improv_type == 'Random Freestyle':\n",
|
| 323 |
+
"\n",
|
| 324 |
+
" outy = [19077]\n",
|
| 325 |
+
"\n",
|
| 326 |
+
"if improv_type == 'Freestyle without Drums':\n",
|
| 327 |
+
"\n",
|
| 328 |
+
" outy = [19077, 18946]\n",
|
| 329 |
+
"\n",
|
| 330 |
+
"if improv_type == 'Freestyle with Drums':\n",
|
| 331 |
+
"\n",
|
| 332 |
+
" outy = [19077, 18947]\n",
|
| 333 |
+
"\n",
|
| 334 |
+
"if improv_type == 'Custom':\n",
|
| 335 |
+
"\n",
|
| 336 |
+
" if add_drums:\n",
|
| 337 |
+
" drumsp = 18947 # Yes\n",
|
| 338 |
+
" else:\n",
|
| 339 |
+
" drumsp = 18946 # No\n",
|
| 340 |
+
"\n",
|
| 341 |
+
" outy = [19077, drumsp, 18948+first_note_MIDI_patch_number]\n",
|
| 342 |
+
"\n",
|
| 343 |
+
"print('Selected Improv sequence:')\n",
|
| 344 |
+
"print(outy)\n",
|
| 345 |
+
"print('=' * 70)\n",
|
| 346 |
+
"\n",
|
| 347 |
+
"torch.cuda.empty_cache()\n",
|
| 348 |
+
"\n",
|
| 349 |
+
"inp = [outy] * number_of_batches_to_generate\n",
|
| 350 |
+
"\n",
|
| 351 |
+
"inp = torch.LongTensor(inp).cuda()\n",
|
| 352 |
+
"\n",
|
| 353 |
+
"with ctx:\n",
|
| 354 |
+
" out = model.generate(inp,\n",
|
| 355 |
+
" number_of_tokens_tp_generate,\n",
|
| 356 |
+
" temperature=temperature,\n",
|
| 357 |
+
" return_prime=True,\n",
|
| 358 |
+
" verbose=True)\n",
|
| 359 |
+
"\n",
|
| 360 |
+
"out0 = out.tolist()\n",
|
| 361 |
+
"\n",
|
| 362 |
+
"print('=' * 70)\n",
|
| 363 |
+
"print('Done!')\n",
|
| 364 |
+
"print('=' * 70)\n",
|
| 365 |
+
"\n",
|
| 366 |
+
"torch.cuda.empty_cache()\n",
|
| 367 |
+
"\n",
|
| 368 |
+
"#======================================================================\n",
|
| 369 |
+
"\n",
|
| 370 |
+
"print('Rendering results...')\n",
|
| 371 |
+
"\n",
|
| 372 |
+
"for i in range(number_of_batches_to_generate):\n",
|
| 373 |
+
"\n",
|
| 374 |
+
" print('=' * 70)\n",
|
| 375 |
+
" print('Batch #', i)\n",
|
| 376 |
+
" print('=' * 70)\n",
|
| 377 |
+
"\n",
|
| 378 |
+
" out1 = out0[i]\n",
|
| 379 |
+
"\n",
|
| 380 |
+
" print('Sample INTs', out1[:12])\n",
|
| 381 |
+
" print('=' * 70)\n",
|
| 382 |
+
"\n",
|
| 383 |
+
" if len(out1) != 0:\n",
|
| 384 |
+
"\n",
|
| 385 |
+
" song = out1\n",
|
| 386 |
+
" song_f = []\n",
|
| 387 |
+
"\n",
|
| 388 |
+
" time = 0\n",
|
| 389 |
+
" dur = 0\n",
|
| 390 |
+
" vel = 90\n",
|
| 391 |
+
" pitch = 0\n",
|
| 392 |
+
" channel = 0\n",
|
| 393 |
+
"\n",
|
| 394 |
+
" patches = [-1] * 16\n",
|
| 395 |
+
"\n",
|
| 396 |
+
" channels = [0] * 16\n",
|
| 397 |
+
" channels[9] = 1\n",
|
| 398 |
+
"\n",
|
| 399 |
+
" for ss in song:\n",
|
| 400 |
+
"\n",
|
| 401 |
+
" if 0 <= ss < 256:\n",
|
| 402 |
+
"\n",
|
| 403 |
+
" time += ss * 16\n",
|
| 404 |
+
"\n",
|
| 405 |
+
" if 256 <= ss < 2304:\n",
|
| 406 |
+
"\n",
|
| 407 |
+
" dur = ((ss-256) // 8) * 16\n",
|
| 408 |
+
" vel = (((ss-256) % 8)+1) * 15\n",
|
| 409 |
+
"\n",
|
| 410 |
+
" if 2304 <= ss < 18945:\n",
|
| 411 |
+
"\n",
|
| 412 |
+
" patch = (ss-2304) // 129\n",
|
| 413 |
+
"\n",
|
| 414 |
+
" if patch < 128:\n",
|
| 415 |
+
"\n",
|
| 416 |
+
" if patch not in patches:\n",
|
| 417 |
+
" if 0 in channels:\n",
|
| 418 |
+
" cha = channels.index(0)\n",
|
| 419 |
+
" channels[cha] = 1\n",
|
| 420 |
+
" else:\n",
|
| 421 |
+
" cha = 15\n",
|
| 422 |
+
"\n",
|
| 423 |
+
" patches[cha] = patch\n",
|
| 424 |
+
" channel = patches.index(patch)\n",
|
| 425 |
+
" else:\n",
|
| 426 |
+
" channel = patches.index(patch)\n",
|
| 427 |
+
"\n",
|
| 428 |
+
" if patch == 128:\n",
|
| 429 |
+
" channel = 9\n",
|
| 430 |
+
"\n",
|
| 431 |
+
" pitch = (ss-2304) % 129\n",
|
| 432 |
+
"\n",
|
| 433 |
+
" song_f.append(['note', time, dur, channel, pitch, vel, patch ])\n",
|
| 434 |
+
"\n",
|
| 435 |
+
" patches = [0 if x==-1 else x for x in patches]\n",
|
| 436 |
+
"\n",
|
| 437 |
+
" data = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,\n",
|
| 438 |
+
" output_signature = 'Monster Music Transformer',\n",
|
| 439 |
+
" output_file_name = '/content/Monster-Music-Transformer-Music-Composition_'+str(i),\n",
|
| 440 |
+
" track_name='Project Los Angeles',\n",
|
| 441 |
+
" list_of_MIDI_patches=patches\n",
|
| 442 |
+
" )\n",
|
| 443 |
+
"\n",
|
| 444 |
+
"\n",
|
| 445 |
+
" print('=' * 70)\n",
|
| 446 |
+
" print('Displaying resulting composition...')\n",
|
| 447 |
+
" print('=' * 70)\n",
|
| 448 |
+
"\n",
|
| 449 |
+
" fname = '/content/Monster-Music-Transformer-Music-Composition_'+str(i)\n",
|
| 450 |
+
"\n",
|
| 451 |
+
" if render_MIDI_to_audio:\n",
|
| 452 |
+
" midi_audio = midi_to_colab_audio(fname + '.mid')\n",
|
| 453 |
+
" display(Audio(midi_audio, rate=16000, normalize=False))\n",
|
| 454 |
+
"\n",
|
| 455 |
+
" TMIDIX.plot_ms_SONG(song_f, plot_title=fname)"
|
| 456 |
+
],
|
| 457 |
+
"metadata": {
|
| 458 |
+
"cellView": "form",
|
| 459 |
+
"id": "Jwxz-eaF0K1y"
|
| 460 |
+
},
|
| 461 |
+
"execution_count": null,
|
| 462 |
+
"outputs": []
|
| 463 |
+
},
|
| 464 |
+
{
|
| 465 |
+
"cell_type": "markdown",
|
| 466 |
+
"source": [
|
| 467 |
+
"# (CUSTOM MIDI)"
|
| 468 |
+
],
|
| 469 |
+
"metadata": {
|
| 470 |
+
"id": "Gt03VtO6uKkb"
|
| 471 |
+
}
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"cell_type": "code",
|
| 475 |
+
"execution_count": null,
|
| 476 |
+
"metadata": {
|
| 477 |
+
"id": "4QXbFLsKqSnt",
|
| 478 |
+
"cellView": "form"
|
| 479 |
+
},
|
| 480 |
+
"outputs": [],
|
| 481 |
+
"source": [
|
| 482 |
+
"#@title Load Seed MIDI\n",
|
| 483 |
+
"\n",
|
| 484 |
+
"#@markdown Press play button to to upload your own seed MIDI or to load one of the provided sample seed MIDIs from the dropdown list below\n",
|
| 485 |
+
"\n",
|
| 486 |
+
"select_seed_MIDI = \"Upload your own custom MIDI\" # @param [\"Upload your own custom MIDI\", \"Monster-Music-Transformer-Piano-Seed-1\", \"Monster-Music-Transformer-Piano-Seed-2\", \"Monster-Music-Transformer-Piano-Seed-3\", \"Monster-Music-Transformer-Piano-Seed-4\", \"Monster-Music-Transformer-Piano-Seed-5\", \"Monster-Music-Transformer-Piano-Seed-6\", \"Monster-Music-Transformer-MI-Seed-1\", \"Monster-Music-Transformer-MI-Seed-2\", \"Monster-Music-Transformer-MI-Seed-3\", \"Monster-Music-Transformer-MI-Seed-4\", \"Monster-Music-Transformer-MI-Seed-5\", \"Monster-Music-Transformer-MI-Seed-6\"]\n",
|
| 487 |
+
"render_MIDI_to_audio = False # @param {type:\"boolean\"}\n",
|
| 488 |
+
"\n",
|
| 489 |
+
"print('=' * 70)\n",
|
| 490 |
+
"print('Monster Music Transformer Seed MIDI Loader')\n",
|
| 491 |
+
"print('=' * 70)\n",
|
| 492 |
+
"\n",
|
| 493 |
+
"f = ''\n",
|
| 494 |
+
"\n",
|
| 495 |
+
"if select_seed_MIDI != \"Upload your own custom MIDI\":\n",
|
| 496 |
+
" print('Loading seed MIDI...')\n",
|
| 497 |
+
" f = '/content/Monster-MIDI-Dataset/Seeds/'+select_seed_MIDI+'.mid'\n",
|
| 498 |
+
"\n",
|
| 499 |
+
"else:\n",
|
| 500 |
+
" print('Upload your own custom MIDI...')\n",
|
| 501 |
+
" print('=' * 70)\n",
|
| 502 |
+
" uploaded_MIDI = files.upload()\n",
|
| 503 |
+
" if list(uploaded_MIDI.keys()):\n",
|
| 504 |
+
" f = list(uploaded_MIDI.keys())[0]\n",
|
| 505 |
+
"\n",
|
| 506 |
+
"if f != '':\n",
|
| 507 |
+
"\n",
|
| 508 |
+
" print('=' * 70)\n",
|
| 509 |
+
" print('File:', f)\n",
|
| 510 |
+
" print('=' * 70)\n",
|
| 511 |
+
"\n",
|
| 512 |
+
" #=======================================================\n",
|
| 513 |
+
" # START PROCESSING\n",
|
| 514 |
+
"\n",
|
| 515 |
+
" # Convering MIDI to ms score with MIDI.py module\n",
|
| 516 |
+
" score = TMIDIX.midi2single_track_ms_score(open(f, 'rb').read(), recalculate_channels=False)\n",
|
| 517 |
+
"\n",
|
| 518 |
+
" # INSTRUMENTS CONVERSION CYCLE\n",
|
| 519 |
+
" events_matrix = []\n",
|
| 520 |
+
" itrack = 1\n",
|
| 521 |
+
" patches = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
|
| 522 |
+
"\n",
|
| 523 |
+
" while itrack < len(score):\n",
|
| 524 |
+
" for event in score[itrack]:\n",
|
| 525 |
+
" if event[0] == 'note' or event[0] == 'patch_change':\n",
|
| 526 |
+
" events_matrix.append(event)\n",
|
| 527 |
+
" itrack += 1\n",
|
| 528 |
+
"\n",
|
| 529 |
+
" events_matrix.sort(key=lambda x: x[1])\n",
|
| 530 |
+
"\n",
|
| 531 |
+
" events_matrix1 = []\n",
|
| 532 |
+
"\n",
|
| 533 |
+
" for event in events_matrix:\n",
|
| 534 |
+
" if event[0] == 'patch_change':\n",
|
| 535 |
+
" patches[event[2]] = event[3]\n",
|
| 536 |
+
"\n",
|
| 537 |
+
" if event[0] == 'note':\n",
|
| 538 |
+
" event.extend([patches[event[3]]])\n",
|
| 539 |
+
"\n",
|
| 540 |
+
" if events_matrix1:\n",
|
| 541 |
+
" if (event[1] == events_matrix1[-1][1]):\n",
|
| 542 |
+
" if ([event[3], event[4]] != events_matrix1[-1][3:5]):\n",
|
| 543 |
+
" events_matrix1.append(event)\n",
|
| 544 |
+
" else:\n",
|
| 545 |
+
" events_matrix1.append(event)\n",
|
| 546 |
+
"\n",
|
| 547 |
+
" else:\n",
|
| 548 |
+
" events_matrix1.append(event)\n",
|
| 549 |
+
"\n",
|
| 550 |
+
" if len(events_matrix1) > 0:\n",
|
| 551 |
+
" if min([e[1] for e in events_matrix1]) >= 0 and min([e[2] for e in events_matrix1]) >= 0:\n",
|
| 552 |
+
"\n",
|
| 553 |
+
" #=======================================================\n",
|
| 554 |
+
" # PRE-PROCESSING\n",
|
| 555 |
+
"\n",
|
| 556 |
+
" # checking number of instruments in a composition\n",
|
| 557 |
+
" instruments_list_without_drums = list(set([y[3] for y in events_matrix1 if y[3] != 9]))\n",
|
| 558 |
+
" instruments_list = list(set([y[3] for y in events_matrix1]))\n",
|
| 559 |
+
"\n",
|
| 560 |
+
" if len(events_matrix1) > 0 and len(instruments_list_without_drums) > 0:\n",
|
| 561 |
+
"\n",
|
| 562 |
+
" #======================================\n",
|
| 563 |
+
"\n",
|
| 564 |
+
" events_matrix2 = []\n",
|
| 565 |
+
"\n",
|
| 566 |
+
" # Recalculating timings\n",
|
| 567 |
+
" for e in events_matrix1:\n",
|
| 568 |
+
"\n",
|
| 569 |
+
" # Original timings\n",
|
| 570 |
+
" e[1] = int(e[1] / 16)\n",
|
| 571 |
+
" e[2] = int(e[2] / 16)\n",
|
| 572 |
+
"\n",
|
| 573 |
+
" #===================================\n",
|
| 574 |
+
" # ORIGINAL COMPOSITION\n",
|
| 575 |
+
" #===================================\n",
|
| 576 |
+
"\n",
|
| 577 |
+
" # Sorting by patch, pitch, then by start-time\n",
|
| 578 |
+
"\n",
|
| 579 |
+
" events_matrix1.sort(key=lambda x: x[6])\n",
|
| 580 |
+
" events_matrix1.sort(key=lambda x: x[4], reverse=True)\n",
|
| 581 |
+
" events_matrix1.sort(key=lambda x: x[1])\n",
|
| 582 |
+
"\n",
|
| 583 |
+
" #=======================================================\n",
|
| 584 |
+
" # FINAL PROCESSING\n",
|
| 585 |
+
"\n",
|
| 586 |
+
" melody_chords = []\n",
|
| 587 |
+
" melody_chords2 = []\n",
|
| 588 |
+
"\n",
|
| 589 |
+
" # Break between compositions / Intro seq\n",
|
| 590 |
+
"\n",
|
| 591 |
+
" if 9 in instruments_list:\n",
|
| 592 |
+
" drums_present = 18947 # Yes\n",
|
| 593 |
+
" else:\n",
|
| 594 |
+
" drums_present = 18946 # No\n",
|
| 595 |
+
"\n",
|
| 596 |
+
" if events_matrix1[0][3] != 9:\n",
|
| 597 |
+
" pat = events_matrix1[0][6]\n",
|
| 598 |
+
" else:\n",
|
| 599 |
+
" pat = 128\n",
|
| 600 |
+
"\n",
|
| 601 |
+
" melody_chords.extend([19077, drums_present, 18948+pat, 0]) # Intro seq\n",
|
| 602 |
+
"\n",
|
| 603 |
+
" #=======================================================\n",
|
| 604 |
+
" # MAIN PROCESSING CYCLE\n",
|
| 605 |
+
" #=======================================================\n",
|
| 606 |
+
"\n",
|
| 607 |
+
" abs_time = 0\n",
|
| 608 |
+
"\n",
|
| 609 |
+
" pbar_time = 0\n",
|
| 610 |
+
"\n",
|
| 611 |
+
" pe = events_matrix1[0]\n",
|
| 612 |
+
"\n",
|
| 613 |
+
" chords_counter = 1\n",
|
| 614 |
+
"\n",
|
| 615 |
+
" comp_chords_len = len(list(set([y[1] for y in events_matrix1])))\n",
|
| 616 |
+
"\n",
|
| 617 |
+
" for e in events_matrix1:\n",
|
| 618 |
+
"\n",
|
| 619 |
+
" #=======================================================\n",
|
| 620 |
+
" # Timings...\n",
|
| 621 |
+
"\n",
|
| 622 |
+
" # Cliping all values...\n",
|
| 623 |
+
" delta_time = max(0, min(255, e[1]-pe[1]))\n",
|
| 624 |
+
"\n",
|
| 625 |
+
" # Durations and channels\n",
|
| 626 |
+
"\n",
|
| 627 |
+
" dur = max(0, min(255, e[2]))\n",
|
| 628 |
+
" cha = max(0, min(15, e[3]))\n",
|
| 629 |
+
"\n",
|
| 630 |
+
" # Patches\n",
|
| 631 |
+
" if cha == 9: # Drums patch will be == 128\n",
|
| 632 |
+
" pat = 128\n",
|
| 633 |
+
"\n",
|
| 634 |
+
" else:\n",
|
| 635 |
+
" pat = e[6]\n",
|
| 636 |
+
"\n",
|
| 637 |
+
" # Pitches\n",
|
| 638 |
+
"\n",
|
| 639 |
+
" ptc = max(1, min(127, e[4]))\n",
|
| 640 |
+
"\n",
|
| 641 |
+
" # Velocities\n",
|
| 642 |
+
"\n",
|
| 643 |
+
" # Calculating octo-velocity\n",
|
| 644 |
+
" vel = max(8, min(127, e[5]))\n",
|
| 645 |
+
" velocity = round(vel / 15)-1\n",
|
| 646 |
+
"\n",
|
| 647 |
+
" #=======================================================\n",
|
| 648 |
+
" # Outro seq\n",
|
| 649 |
+
"\n",
|
| 650 |
+
" # if ((comp_chords_len - chords_counter) == 50) and (delta_time != 0):\n",
|
| 651 |
+
" # out_t = 18946+delta_time\n",
|
| 652 |
+
" # out_p = 19202+ptc\n",
|
| 653 |
+
" # melody_chords.extend([18945, out_t, out_p]) # outro seq\n",
|
| 654 |
+
"\n",
|
| 655 |
+
"\n",
|
| 656 |
+
" # if delta_time != 0:\n",
|
| 657 |
+
" # chords_counter += 1\n",
|
| 658 |
+
"\n",
|
| 659 |
+
" #=======================================================\n",
|
| 660 |
+
" # FINAL NOTE SEQ\n",
|
| 661 |
+
"\n",
|
| 662 |
+
" # Writing final note asynchronously\n",
|
| 663 |
+
"\n",
|
| 664 |
+
" dur_vel = (8 * dur) + velocity\n",
|
| 665 |
+
" pat_ptc = (129 * pat) + ptc\n",
|
| 666 |
+
"\n",
|
| 667 |
+
" if delta_time != 0:\n",
|
| 668 |
+
" melody_chords.extend([delta_time, dur_vel+256, pat_ptc+2304])\n",
|
| 669 |
+
" else:\n",
|
| 670 |
+
" melody_chords.extend([dur_vel+256, pat_ptc+2304])\n",
|
| 671 |
+
" melody_chords2.append([delta_time, dur_vel+256, pat_ptc+2304])\n",
|
| 672 |
+
"\n",
|
| 673 |
+
" pe = e\n",
|
| 674 |
+
"\n",
|
| 675 |
+
" #=======================================================\n",
|
| 676 |
+
"\n",
|
| 677 |
+
" # melody_chords.extend([19462, 19462, 19462]) # EOS\n",
|
| 678 |
+
"\n",
|
| 679 |
+
" #=======================================================\n",
|
| 680 |
+
"\n",
|
| 681 |
+
" # TOTAL DICTIONARY SIZE 19462+1=19463\n",
|
| 682 |
+
" #=======================================================\n",
|
| 683 |
+
"\n",
|
| 684 |
+
" #=======================================================\n",
|
| 685 |
+
"\n",
|
| 686 |
+
" song = melody_chords\n",
|
| 687 |
+
"\n",
|
| 688 |
+
" song_f = []\n",
|
| 689 |
+
"\n",
|
| 690 |
+
" time = 0\n",
|
| 691 |
+
" dur = 0\n",
|
| 692 |
+
" vel = 90\n",
|
| 693 |
+
" pitch = 0\n",
|
| 694 |
+
" channel = 0\n",
|
| 695 |
+
"\n",
|
| 696 |
+
" patches = [-1] * 16\n",
|
| 697 |
+
"\n",
|
| 698 |
+
" channels = [0] * 16\n",
|
| 699 |
+
" channels[9] = 1\n",
|
| 700 |
+
"\n",
|
| 701 |
+
" for ss in song:\n",
|
| 702 |
+
"\n",
|
| 703 |
+
" if 0 <= ss < 256:\n",
|
| 704 |
+
"\n",
|
| 705 |
+
" time += ss * 16\n",
|
| 706 |
+
"\n",
|
| 707 |
+
" if 256 <= ss < 2304:\n",
|
| 708 |
+
"\n",
|
| 709 |
+
" dur = ((ss-256) // 8) * 16\n",
|
| 710 |
+
" vel = (((ss-256) % 8)+1) * 15\n",
|
| 711 |
+
"\n",
|
| 712 |
+
" if 2304 <= ss < 18945:\n",
|
| 713 |
+
"\n",
|
| 714 |
+
" patch = (ss-2304) // 129\n",
|
| 715 |
+
"\n",
|
| 716 |
+
" if patch < 128:\n",
|
| 717 |
+
"\n",
|
| 718 |
+
" if patch not in patches:\n",
|
| 719 |
+
" if 0 in channels:\n",
|
| 720 |
+
" cha = channels.index(0)\n",
|
| 721 |
+
" channels[cha] = 1\n",
|
| 722 |
+
" else:\n",
|
| 723 |
+
" cha = 15\n",
|
| 724 |
+
"\n",
|
| 725 |
+
" patches[cha] = patch\n",
|
| 726 |
+
" channel = patches.index(patch)\n",
|
| 727 |
+
" else:\n",
|
| 728 |
+
" channel = patches.index(patch)\n",
|
| 729 |
+
"\n",
|
| 730 |
+
" if patch == 128:\n",
|
| 731 |
+
" channel = 9\n",
|
| 732 |
+
"\n",
|
| 733 |
+
" pitch = (ss-2304) % 129\n",
|
| 734 |
+
"\n",
|
| 735 |
+
" song_f.append(['note', time, dur, channel, pitch, vel, patch ])\n",
|
| 736 |
+
"\n",
|
| 737 |
+
" patches = [0 if x==-1 else x for x in patches]\n",
|
| 738 |
+
"\n",
|
| 739 |
+
" detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,\n",
|
| 740 |
+
" output_signature = 'Monster Music Transformer',\n",
|
| 741 |
+
" output_file_name = '/content/Monster-Music-Transformer-Seed-Composition',\n",
|
| 742 |
+
" track_name='Project Los Angeles',\n",
|
| 743 |
+
" list_of_MIDI_patches=patches\n",
|
| 744 |
+
" )\n",
|
| 745 |
+
"\n",
|
| 746 |
+
" #=======================================================\n",
|
| 747 |
+
"\n",
|
| 748 |
+
" print('=' * 70)\n",
|
| 749 |
+
" print('Composition stats:')\n",
|
| 750 |
+
" print('Composition has', len(melody_chords2), 'notes')\n",
|
| 751 |
+
" print('Composition has', len(melody_chords), 'tokens')\n",
|
| 752 |
+
" print('Composition MIDI patches:', sorted(list(set([((y-2304) // 129) for y in melody_chords if 2304 <= y < 18945]))))\n",
|
| 753 |
+
" print('=' * 70)\n",
|
| 754 |
+
"\n",
|
| 755 |
+
" print('Displaying resulting composition...')\n",
|
| 756 |
+
" print('=' * 70)\n",
|
| 757 |
+
"\n",
|
| 758 |
+
" fname = '/content/Monster-Music-Transformer-Seed-Composition'\n",
|
| 759 |
+
"\n",
|
| 760 |
+
" if render_MIDI_to_audio:\n",
|
| 761 |
+
" midi_audio = midi_to_colab_audio(fname + '.mid')\n",
|
| 762 |
+
" display(Audio(midi_audio, rate=16000, normalize=False))\n",
|
| 763 |
+
"\n",
|
| 764 |
+
" TMIDIX.plot_ms_SONG(song_f, plot_title=fname)\n",
|
| 765 |
+
"\n",
|
| 766 |
+
"else:\n",
|
| 767 |
+
" print('=' * 70)"
|
| 768 |
+
]
|
| 769 |
+
},
|
| 770 |
+
{
|
| 771 |
+
"cell_type": "markdown",
|
| 772 |
+
"source": [
|
| 773 |
+
"# (CONTINUATION)"
|
| 774 |
+
],
|
| 775 |
+
"metadata": {
|
| 776 |
+
"id": "fmm3KjOtoVp9"
|
| 777 |
+
}
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"cell_type": "code",
|
| 781 |
+
"execution_count": null,
|
| 782 |
+
"metadata": {
|
| 783 |
+
"id": "dkvXYwR_qSnx",
|
| 784 |
+
"cellView": "form"
|
| 785 |
+
},
|
| 786 |
+
"outputs": [],
|
| 787 |
+
"source": [
|
| 788 |
+
"#@title Standard Continuation\n",
|
| 789 |
+
"\n",
|
| 790 |
+
"#@markdown Generation settings\n",
|
| 791 |
+
"\n",
|
| 792 |
+
"try_to_generate_outro = False #@param {type:\"boolean\"}\n",
|
| 793 |
+
"number_of_prime_tokens = 7191 # @param {type:\"slider\", min:3, max:8190, step:3}\n",
|
| 794 |
+
"number_of_tokens_to_generate = 504 # @param {type:\"slider\", min:30, max:8190, step:3}\n",
|
| 795 |
+
"number_of_batches_to_generate = 4 #@param {type:\"slider\", min:1, max:16, step:1}\n",
|
| 796 |
+
"temperature = 0.9 # @param {type:\"slider\", min:0.1, max:1, step:0.05}\n",
|
| 797 |
+
"\n",
|
| 798 |
+
"#@markdown Other settings\n",
|
| 799 |
+
"include_prime_tokens_in_generated_output = False #@param {type:\"boolean\"}\n",
|
| 800 |
+
"allow_model_to_stop_generation_if_needed = False #@param {type:\"boolean\"}\n",
|
| 801 |
+
"render_MIDI_to_audio = True # @param {type:\"boolean\"}\n",
|
| 802 |
+
"\n",
|
| 803 |
+
"print('=' * 70)\n",
|
| 804 |
+
"print('Monster Music Transformer Standard Continuation Model Generator')\n",
|
| 805 |
+
"print('=' * 70)\n",
|
| 806 |
+
"\n",
|
| 807 |
+
"if allow_model_to_stop_generation_if_needed:\n",
|
| 808 |
+
" min_stop_token = 19078\n",
|
| 809 |
+
"else:\n",
|
| 810 |
+
" min_stop_token = None\n",
|
| 811 |
+
"\n",
|
| 812 |
+
"outy = melody_chords[:number_of_prime_tokens]\n",
|
| 813 |
+
"\n",
|
| 814 |
+
"if try_to_generate_outro:\n",
|
| 815 |
+
" outy.extend([18945])\n",
|
| 816 |
+
"\n",
|
| 817 |
+
"torch.cuda.empty_cache()\n",
|
| 818 |
+
"\n",
|
| 819 |
+
"inp = [outy] * number_of_batches_to_generate\n",
|
| 820 |
+
"\n",
|
| 821 |
+
"inp = torch.LongTensor(inp).cuda()\n",
|
| 822 |
+
"\n",
|
| 823 |
+
"with ctx:\n",
|
| 824 |
+
" out = model.generate(inp,\n",
|
| 825 |
+
" number_of_tokens_to_generate,\n",
|
| 826 |
+
" temperature=temperature,\n",
|
| 827 |
+
" return_prime=include_prime_tokens_in_generated_output,\n",
|
| 828 |
+
" eos_token=min_stop_token,\n",
|
| 829 |
+
" verbose=True)\n",
|
| 830 |
+
"\n",
|
| 831 |
+
"out0 = out.tolist()\n",
|
| 832 |
+
"\n",
|
| 833 |
+
"torch.cuda.empty_cache()\n",
|
| 834 |
+
"\n",
|
| 835 |
+
"print('=' * 70)\n",
|
| 836 |
+
"print('Done!')\n",
|
| 837 |
+
"print('=' * 70)\n",
|
| 838 |
+
"\n",
|
| 839 |
+
"#======================================================================\n",
|
| 840 |
+
"print('Rendering results...')\n",
|
| 841 |
+
"\n",
|
| 842 |
+
"for i in range(number_of_batches_to_generate):\n",
|
| 843 |
+
"\n",
|
| 844 |
+
" print('=' * 70)\n",
|
| 845 |
+
" print('Batch #', i)\n",
|
| 846 |
+
" print('=' * 70)\n",
|
| 847 |
+
"\n",
|
| 848 |
+
" out1 = out0[i]\n",
|
| 849 |
+
"\n",
|
| 850 |
+
" print('Sample INTs', out1[:12])\n",
|
| 851 |
+
" print('=' * 70)\n",
|
| 852 |
+
"\n",
|
| 853 |
+
" if len(out) != 0:\n",
|
| 854 |
+
"\n",
|
| 855 |
+
" song = out1\n",
|
| 856 |
+
" song_f = []\n",
|
| 857 |
+
"\n",
|
| 858 |
+
" time = 0\n",
|
| 859 |
+
" dur = 0\n",
|
| 860 |
+
" vel = 90\n",
|
| 861 |
+
" pitch = 0\n",
|
| 862 |
+
" channel = 0\n",
|
| 863 |
+
"\n",
|
| 864 |
+
" patches = [-1] * 16\n",
|
| 865 |
+
"\n",
|
| 866 |
+
" channels = [0] * 16\n",
|
| 867 |
+
" channels[9] = 1\n",
|
| 868 |
+
"\n",
|
| 869 |
+
" for ss in song:\n",
|
| 870 |
+
"\n",
|
| 871 |
+
" if 0 <= ss < 256:\n",
|
| 872 |
+
"\n",
|
| 873 |
+
" time += ss * 16\n",
|
| 874 |
+
"\n",
|
| 875 |
+
" if 256 <= ss < 2304:\n",
|
| 876 |
+
"\n",
|
| 877 |
+
" dur = ((ss-256) // 8) * 16\n",
|
| 878 |
+
" vel = (((ss-256) % 8)+1) * 15\n",
|
| 879 |
+
"\n",
|
| 880 |
+
" if 2304 <= ss < 18945:\n",
|
| 881 |
+
"\n",
|
| 882 |
+
" patch = (ss-2304) // 129\n",
|
| 883 |
+
"\n",
|
| 884 |
+
" if patch < 128:\n",
|
| 885 |
+
"\n",
|
| 886 |
+
" if patch not in patches:\n",
|
| 887 |
+
" if 0 in channels:\n",
|
| 888 |
+
" cha = channels.index(0)\n",
|
| 889 |
+
" channels[cha] = 1\n",
|
| 890 |
+
" else:\n",
|
| 891 |
+
" cha = 15\n",
|
| 892 |
+
"\n",
|
| 893 |
+
" patches[cha] = patch\n",
|
| 894 |
+
" channel = patches.index(patch)\n",
|
| 895 |
+
" else:\n",
|
| 896 |
+
" channel = patches.index(patch)\n",
|
| 897 |
+
"\n",
|
| 898 |
+
" if patch == 128:\n",
|
| 899 |
+
" channel = 9\n",
|
| 900 |
+
"\n",
|
| 901 |
+
" pitch = (ss-2304) % 129\n",
|
| 902 |
+
"\n",
|
| 903 |
+
" song_f.append(['note', time, dur, channel, pitch, vel, patch ])\n",
|
| 904 |
+
"\n",
|
| 905 |
+
" patches = [0 if x==-1 else x for x in patches]\n",
|
| 906 |
+
"\n",
|
| 907 |
+
" detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,\n",
|
| 908 |
+
" output_signature = 'Monster Music Transformer',\n",
|
| 909 |
+
" output_file_name = '/content/Monster-Music-Transformer-Music-Composition_'+str(i),\n",
|
| 910 |
+
" track_name='Project Los Angeles',\n",
|
| 911 |
+
" list_of_MIDI_patches=patches\n",
|
| 912 |
+
" )\n",
|
| 913 |
+
" print('=' * 70)\n",
|
| 914 |
+
" print('Displaying resulting composition...')\n",
|
| 915 |
+
" print('=' * 70)\n",
|
| 916 |
+
"\n",
|
| 917 |
+
" fname = '/content/Monster-Music-Transformer-Music-Composition_'+str(i)\n",
|
| 918 |
+
"\n",
|
| 919 |
+
" if render_MIDI_to_audio:\n",
|
| 920 |
+
" midi_audio = midi_to_colab_audio(fname + '.mid')\n",
|
| 921 |
+
" display(Audio(midi_audio, rate=16000, normalize=False))\n",
|
| 922 |
+
"\n",
|
| 923 |
+
" TMIDIX.plot_ms_SONG(song_f, plot_title=fname)"
|
| 924 |
+
]
|
| 925 |
+
},
|
| 926 |
+
{
|
| 927 |
+
"cell_type": "markdown",
|
| 928 |
+
"source": [
|
| 929 |
+
"# Congrats! You did it! :)"
|
| 930 |
+
],
|
| 931 |
+
"metadata": {
|
| 932 |
+
"id": "eoWDEy6CwDr6"
|
| 933 |
+
}
|
| 934 |
+
}
|
| 935 |
+
],
|
| 936 |
+
"metadata": {
|
| 937 |
+
"accelerator": "GPU",
|
| 938 |
+
"colab": {
|
| 939 |
+
"private_outputs": true,
|
| 940 |
+
"provenance": [],
|
| 941 |
+
"gpuType": "A100",
|
| 942 |
+
"gpuClass": "premium",
|
| 943 |
+
"machine_shape": "hm"
|
| 944 |
+
},
|
| 945 |
+
"kernelspec": {
|
| 946 |
+
"display_name": "Python 3",
|
| 947 |
+
"name": "python3"
|
| 948 |
+
},
|
| 949 |
+
"language_info": {
|
| 950 |
+
"codemirror_mode": {
|
| 951 |
+
"name": "ipython",
|
| 952 |
+
"version": 3
|
| 953 |
+
},
|
| 954 |
+
"file_extension": ".py",
|
| 955 |
+
"mimetype": "text/x-python",
|
| 956 |
+
"name": "python",
|
| 957 |
+
"nbconvert_exporter": "python",
|
| 958 |
+
"pygments_lexer": "ipython3",
|
| 959 |
+
"version": "3.9.13"
|
| 960 |
+
}
|
| 961 |
+
},
|
| 962 |
+
"nbformat": 4,
|
| 963 |
+
"nbformat_minor": 0
|
| 964 |
+
}
|
monster_music_transformer.py
ADDED
|
@@ -0,0 +1,809 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Monster_Music_Transformer.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colaboratory.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1_fs1W2cuXxiMKznQIP3wtUxSIbxt71Nk
|
| 8 |
+
|
| 9 |
+
# Monster Music Transformer (ver. 1.0)
|
| 10 |
+
|
| 11 |
+
***
|
| 12 |
+
|
| 13 |
+
Powered by tegridy-tools: https://github.com/asigalov61/tegridy-tools
|
| 14 |
+
|
| 15 |
+
***
|
| 16 |
+
|
| 17 |
+
WARNING: This complete implementation is a functioning model of the Artificial Intelligence. Please excercise great humility, care, and respect. https://www.nscai.gov/
|
| 18 |
+
|
| 19 |
+
***
|
| 20 |
+
|
| 21 |
+
#### Project Los Angeles
|
| 22 |
+
|
| 23 |
+
#### Tegridy Code 2024
|
| 24 |
+
|
| 25 |
+
***
|
| 26 |
+
|
| 27 |
+
# (GPU CHECK)
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
#@title NVIDIA GPU check
|
| 31 |
+
!nvidia-smi
|
| 32 |
+
|
| 33 |
+
"""# (SETUP ENVIRONMENT)"""
|
| 34 |
+
|
| 35 |
+
#@title Install dependencies
|
| 36 |
+
!git clone --depth 1 https://github.com/asigalov61/Monster-MIDI-Dataset
|
| 37 |
+
!pip install huggingface_hub
|
| 38 |
+
!pip install einops
|
| 39 |
+
!pip install torch-summary
|
| 40 |
+
!apt install fluidsynth #Pip does not work for some reason. Only apt works
|
| 41 |
+
|
| 42 |
+
# Commented out IPython magic to ensure Python compatibility.
|
| 43 |
+
#@title Import modules
|
| 44 |
+
|
| 45 |
+
print('=' * 70)
|
| 46 |
+
print('Loading core Monster Music Transformer modules...')
|
| 47 |
+
|
| 48 |
+
import os
|
| 49 |
+
import copy
|
| 50 |
+
import pickle
|
| 51 |
+
import secrets
|
| 52 |
+
import statistics
|
| 53 |
+
from time import time
|
| 54 |
+
import tqdm
|
| 55 |
+
|
| 56 |
+
print('=' * 70)
|
| 57 |
+
print('Loading main Monster Music Transformer modules...')
|
| 58 |
+
import torch
|
| 59 |
+
|
| 60 |
+
# %cd /content/Monster-MIDI-Dataset
|
| 61 |
+
|
| 62 |
+
import TMIDIX
|
| 63 |
+
|
| 64 |
+
from midi_to_colab_audio import midi_to_colab_audio
|
| 65 |
+
|
| 66 |
+
from x_transformer_1_27_16 import *
|
| 67 |
+
|
| 68 |
+
import random
|
| 69 |
+
|
| 70 |
+
# %cd /content/
|
| 71 |
+
print('=' * 70)
|
| 72 |
+
print('Loading aux Monster Music Transformer modules...')
|
| 73 |
+
|
| 74 |
+
import matplotlib.pyplot as plt
|
| 75 |
+
|
| 76 |
+
from torchsummary import summary
|
| 77 |
+
from sklearn import metrics
|
| 78 |
+
|
| 79 |
+
from IPython.display import Audio, display
|
| 80 |
+
|
| 81 |
+
from huggingface_hub import hf_hub_download
|
| 82 |
+
|
| 83 |
+
from google.colab import files
|
| 84 |
+
|
| 85 |
+
print('=' * 70)
|
| 86 |
+
print('Done!')
|
| 87 |
+
print('Enjoy! :)')
|
| 88 |
+
print('=' * 70)
|
| 89 |
+
|
| 90 |
+
"""# (LOAD MODEL)"""
|
| 91 |
+
|
| 92 |
+
#@title Load Monster Music Transformer Pre-Trained Model
|
| 93 |
+
|
| 94 |
+
#@markdown Choose model
|
| 95 |
+
|
| 96 |
+
select_model_to_load = "651M-32L-Fast-Large" # @param ["651M-32L-Fast-Large"]
|
| 97 |
+
|
| 98 |
+
#@markdown Model precision option
|
| 99 |
+
|
| 100 |
+
model_precision = "bfloat16" # @param ["bfloat16", "float16"]
|
| 101 |
+
|
| 102 |
+
#@markdown bfloat16 == Half precision/faster speed (if supported, otherwise the model will default to float16)
|
| 103 |
+
|
| 104 |
+
#@markdown float16 == Full precision/fast speed
|
| 105 |
+
|
| 106 |
+
plot_tokens_embeddings = "None" # @param ["None", "Start Times", "Durations Velocities", "Piano Pitches", "Drums Pitches", "Aux"]
|
| 107 |
+
|
| 108 |
+
print('=' * 70)
|
| 109 |
+
print('Loading Monster Music Transformer', select_model_to_load,'Pre-Trained Model...')
|
| 110 |
+
print('Please wait...')
|
| 111 |
+
print('=' * 70)
|
| 112 |
+
|
| 113 |
+
full_path_to_models_dir = "/content/Monster-MIDI-Dataset/"
|
| 114 |
+
|
| 115 |
+
if select_model_to_load == '651M-32L-Fast-Large':
|
| 116 |
+
|
| 117 |
+
model_checkpoint_file_name = 'Monster_Music_Transformer_Large_Trained_Model_22501_steps_0.3419_loss_0.9121_acc.pth'
|
| 118 |
+
model_path = full_path_to_models_dir+'/'+model_checkpoint_file_name
|
| 119 |
+
num_layers = 36
|
| 120 |
+
if os.path.isfile(model_path):
|
| 121 |
+
print('Model already exists...')
|
| 122 |
+
|
| 123 |
+
else:
|
| 124 |
+
hf_hub_download(repo_id='asigalov61/Monster-Music-Transformer',
|
| 125 |
+
filename=model_checkpoint_file_name,
|
| 126 |
+
local_dir='/content/Monster-MIDI-Dataset',
|
| 127 |
+
local_dir_use_symlinks=False)
|
| 128 |
+
|
| 129 |
+
print('=' * 70)
|
| 130 |
+
print('Instantiating model...')
|
| 131 |
+
|
| 132 |
+
device_type = 'cuda'
|
| 133 |
+
|
| 134 |
+
if model_precision == 'bfloat16' and torch.cuda.is_bf16_supported():
|
| 135 |
+
dtype = 'bfloat16'
|
| 136 |
+
else:
|
| 137 |
+
dtype = 'float16'
|
| 138 |
+
|
| 139 |
+
if model_precision == 'float16':
|
| 140 |
+
dtype = 'float16'
|
| 141 |
+
|
| 142 |
+
ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
|
| 143 |
+
ctx = torch.amp.autocast(device_type=device_type, dtype=ptdtype)
|
| 144 |
+
|
| 145 |
+
SEQ_LEN = 8192
|
| 146 |
+
|
| 147 |
+
# instantiate the model
|
| 148 |
+
|
| 149 |
+
model = TransformerWrapper(
|
| 150 |
+
num_tokens = 19080,
|
| 151 |
+
max_seq_len = SEQ_LEN,
|
| 152 |
+
attn_layers = Decoder(dim = 1024, depth = num_layers, heads = 32, attn_flash=True)
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
model = AutoregressiveWrapper(model, ignore_index=19079)
|
| 156 |
+
|
| 157 |
+
model.cuda()
|
| 158 |
+
print('=' * 70)
|
| 159 |
+
|
| 160 |
+
print('Loading model checkpoint...')
|
| 161 |
+
|
| 162 |
+
model.load_state_dict(torch.load(model_path))
|
| 163 |
+
print('=' * 70)
|
| 164 |
+
|
| 165 |
+
model.eval()
|
| 166 |
+
|
| 167 |
+
print('Done!')
|
| 168 |
+
print('=' * 70)
|
| 169 |
+
|
| 170 |
+
print('Model will use', dtype, 'precision...')
|
| 171 |
+
print('=' * 70)
|
| 172 |
+
|
| 173 |
+
# Model stats
|
| 174 |
+
print('Model summary...')
|
| 175 |
+
summary(model)
|
| 176 |
+
|
| 177 |
+
# Plot Token Embeddings
|
| 178 |
+
if plot_tokens_embeddings != 'None':
|
| 179 |
+
tok_emb = model.net.token_emb.emb.weight.detach().cpu().tolist()
|
| 180 |
+
|
| 181 |
+
if plot_tokens_embeddings == 'Start Times':
|
| 182 |
+
tok_range = [0, 256]
|
| 183 |
+
|
| 184 |
+
elif plot_tokens_embeddings == 'Durations Velocities':
|
| 185 |
+
tok_range = [256, 2304]
|
| 186 |
+
|
| 187 |
+
elif plot_tokens_embeddings == 'Piano Pitches':
|
| 188 |
+
tok_range = [2304, 2304+128]
|
| 189 |
+
|
| 190 |
+
elif plot_tokens_embeddings == 'Drums Pitches':
|
| 191 |
+
tok_range = [18945-128, 18945]
|
| 192 |
+
|
| 193 |
+
elif plot_tokens_embeddings == 'Aux':
|
| 194 |
+
tok_range = [18945, 19079]
|
| 195 |
+
|
| 196 |
+
if plot_tokens_embeddings != 'None':
|
| 197 |
+
|
| 198 |
+
tok_emb1 = []
|
| 199 |
+
|
| 200 |
+
for t in tok_emb[tok_range[0]:tok_range[1]]:
|
| 201 |
+
tok_emb1.append(t)
|
| 202 |
+
|
| 203 |
+
cos_sim = metrics.pairwise_distances(
|
| 204 |
+
tok_emb1, metric='cosine'
|
| 205 |
+
)
|
| 206 |
+
plt.figure(figsize=(7, 7))
|
| 207 |
+
plt.imshow(cos_sim, cmap="inferno", interpolation="nearest")
|
| 208 |
+
im_ratio = cos_sim.shape[0] / cos_sim.shape[1]
|
| 209 |
+
plt.colorbar(fraction=0.046 * im_ratio, pad=0.04)
|
| 210 |
+
plt.xlabel("Position")
|
| 211 |
+
plt.ylabel("Position")
|
| 212 |
+
plt.tight_layout()
|
| 213 |
+
plt.plot()
|
| 214 |
+
plt.savefig("/content/Monster-Music-Transformer-Tokens-Embeddings-Plot.png", bbox_inches="tight")
|
| 215 |
+
|
| 216 |
+
"""# (GENERATE)
|
| 217 |
+
|
| 218 |
+
# (IMPROV)
|
| 219 |
+
"""
|
| 220 |
+
|
| 221 |
+
#@title Standard Improv Generator
|
| 222 |
+
|
| 223 |
+
#@markdown Improv type
|
| 224 |
+
|
| 225 |
+
improv_type = "Random Freestyle" # @param ["Random Freestyle", "Freestyle without Drums", "Freestyle with Drums", "Custom"]
|
| 226 |
+
|
| 227 |
+
#@markdown Custom Improv settings
|
| 228 |
+
|
| 229 |
+
first_note_MIDI_patch_number = 0 # @param {type:"slider", min:0, max:128, step:1}
|
| 230 |
+
add_drums = False #@param {type:"boolean"}
|
| 231 |
+
|
| 232 |
+
#@markdown Generation settings
|
| 233 |
+
|
| 234 |
+
number_of_tokens_tp_generate = 546 # @param {type:"slider", min:30, max:8190, step:3}
|
| 235 |
+
number_of_batches_to_generate = 4 #@param {type:"slider", min:1, max:16, step:1}
|
| 236 |
+
temperature = 0.9 # @param {type:"slider", min:0.1, max:1, step:0.05}
|
| 237 |
+
|
| 238 |
+
#@markdown Other settings
|
| 239 |
+
|
| 240 |
+
render_MIDI_to_audio = True # @param {type:"boolean"}
|
| 241 |
+
|
| 242 |
+
print('=' * 70)
|
| 243 |
+
print('Monster Music Transformer Standard Improv Model Generator')
|
| 244 |
+
print('=' * 70)
|
| 245 |
+
|
| 246 |
+
if improv_type == 'Random Freestyle':
|
| 247 |
+
|
| 248 |
+
outy = [19077]
|
| 249 |
+
|
| 250 |
+
if improv_type == 'Freestyle without Drums':
|
| 251 |
+
|
| 252 |
+
outy = [19077, 18946]
|
| 253 |
+
|
| 254 |
+
if improv_type == 'Freestyle with Drums':
|
| 255 |
+
|
| 256 |
+
outy = [19077, 18947]
|
| 257 |
+
|
| 258 |
+
if improv_type == 'Custom':
|
| 259 |
+
|
| 260 |
+
if add_drums:
|
| 261 |
+
drumsp = 18947 # Yes
|
| 262 |
+
else:
|
| 263 |
+
drumsp = 18946 # No
|
| 264 |
+
|
| 265 |
+
outy = [19077, drumsp, 18948+first_note_MIDI_patch_number]
|
| 266 |
+
|
| 267 |
+
print('Selected Improv sequence:')
|
| 268 |
+
print(outy)
|
| 269 |
+
print('=' * 70)
|
| 270 |
+
|
| 271 |
+
torch.cuda.empty_cache()
|
| 272 |
+
|
| 273 |
+
inp = [outy] * number_of_batches_to_generate
|
| 274 |
+
|
| 275 |
+
inp = torch.LongTensor(inp).cuda()
|
| 276 |
+
|
| 277 |
+
with ctx:
|
| 278 |
+
out = model.generate(inp,
|
| 279 |
+
number_of_tokens_tp_generate,
|
| 280 |
+
temperature=temperature,
|
| 281 |
+
return_prime=True,
|
| 282 |
+
verbose=True)
|
| 283 |
+
|
| 284 |
+
out0 = out.tolist()
|
| 285 |
+
|
| 286 |
+
print('=' * 70)
|
| 287 |
+
print('Done!')
|
| 288 |
+
print('=' * 70)
|
| 289 |
+
|
| 290 |
+
torch.cuda.empty_cache()
|
| 291 |
+
|
| 292 |
+
#======================================================================
|
| 293 |
+
|
| 294 |
+
print('Rendering results...')
|
| 295 |
+
|
| 296 |
+
for i in range(number_of_batches_to_generate):
|
| 297 |
+
|
| 298 |
+
print('=' * 70)
|
| 299 |
+
print('Batch #', i)
|
| 300 |
+
print('=' * 70)
|
| 301 |
+
|
| 302 |
+
out1 = out0[i]
|
| 303 |
+
|
| 304 |
+
print('Sample INTs', out1[:12])
|
| 305 |
+
print('=' * 70)
|
| 306 |
+
|
| 307 |
+
if len(out1) != 0:
|
| 308 |
+
|
| 309 |
+
song = out1
|
| 310 |
+
song_f = []
|
| 311 |
+
|
| 312 |
+
time = 0
|
| 313 |
+
dur = 0
|
| 314 |
+
vel = 90
|
| 315 |
+
pitch = 0
|
| 316 |
+
channel = 0
|
| 317 |
+
|
| 318 |
+
patches = [-1] * 16
|
| 319 |
+
|
| 320 |
+
channels = [0] * 16
|
| 321 |
+
channels[9] = 1
|
| 322 |
+
|
| 323 |
+
for ss in song:
|
| 324 |
+
|
| 325 |
+
if 0 <= ss < 256:
|
| 326 |
+
|
| 327 |
+
time += ss * 16
|
| 328 |
+
|
| 329 |
+
if 256 <= ss < 2304:
|
| 330 |
+
|
| 331 |
+
dur = ((ss-256) // 8) * 16
|
| 332 |
+
vel = (((ss-256) % 8)+1) * 15
|
| 333 |
+
|
| 334 |
+
if 2304 <= ss < 18945:
|
| 335 |
+
|
| 336 |
+
patch = (ss-2304) // 129
|
| 337 |
+
|
| 338 |
+
if patch < 128:
|
| 339 |
+
|
| 340 |
+
if patch not in patches:
|
| 341 |
+
if 0 in channels:
|
| 342 |
+
cha = channels.index(0)
|
| 343 |
+
channels[cha] = 1
|
| 344 |
+
else:
|
| 345 |
+
cha = 15
|
| 346 |
+
|
| 347 |
+
patches[cha] = patch
|
| 348 |
+
channel = patches.index(patch)
|
| 349 |
+
else:
|
| 350 |
+
channel = patches.index(patch)
|
| 351 |
+
|
| 352 |
+
if patch == 128:
|
| 353 |
+
channel = 9
|
| 354 |
+
|
| 355 |
+
pitch = (ss-2304) % 129
|
| 356 |
+
|
| 357 |
+
song_f.append(['note', time, dur, channel, pitch, vel, patch ])
|
| 358 |
+
|
| 359 |
+
patches = [0 if x==-1 else x for x in patches]
|
| 360 |
+
|
| 361 |
+
data = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
|
| 362 |
+
output_signature = 'Monster Music Transformer',
|
| 363 |
+
output_file_name = '/content/Monster-Music-Transformer-Music-Composition_'+str(i),
|
| 364 |
+
track_name='Project Los Angeles',
|
| 365 |
+
list_of_MIDI_patches=patches
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
print('=' * 70)
|
| 370 |
+
print('Displaying resulting composition...')
|
| 371 |
+
print('=' * 70)
|
| 372 |
+
|
| 373 |
+
fname = '/content/Monster-Music-Transformer-Music-Composition_'+str(i)
|
| 374 |
+
|
| 375 |
+
if render_MIDI_to_audio:
|
| 376 |
+
midi_audio = midi_to_colab_audio(fname + '.mid')
|
| 377 |
+
display(Audio(midi_audio, rate=16000, normalize=False))
|
| 378 |
+
|
| 379 |
+
TMIDIX.plot_ms_SONG(song_f, plot_title=fname)
|
| 380 |
+
|
| 381 |
+
"""# (CUSTOM MIDI)"""
|
| 382 |
+
|
| 383 |
+
#@title Load Seed MIDI
|
| 384 |
+
|
| 385 |
+
#@markdown Press play button to to upload your own seed MIDI or to load one of the provided sample seed MIDIs from the dropdown list below
|
| 386 |
+
|
| 387 |
+
select_seed_MIDI = "Upload your own custom MIDI" # @param ["Upload your own custom MIDI", "Monster-Music-Transformer-Piano-Seed-1", "Monster-Music-Transformer-Piano-Seed-2", "Monster-Music-Transformer-Piano-Seed-3", "Monster-Music-Transformer-Piano-Seed-4", "Monster-Music-Transformer-Piano-Seed-5", "Monster-Music-Transformer-Piano-Seed-6", "Monster-Music-Transformer-MI-Seed-1", "Monster-Music-Transformer-MI-Seed-2", "Monster-Music-Transformer-MI-Seed-3", "Monster-Music-Transformer-MI-Seed-4", "Monster-Music-Transformer-MI-Seed-5", "Monster-Music-Transformer-MI-Seed-6"]
|
| 388 |
+
render_MIDI_to_audio = False # @param {type:"boolean"}
|
| 389 |
+
|
| 390 |
+
print('=' * 70)
|
| 391 |
+
print('Monster Music Transformer Seed MIDI Loader')
|
| 392 |
+
print('=' * 70)
|
| 393 |
+
|
| 394 |
+
f = ''
|
| 395 |
+
|
| 396 |
+
if select_seed_MIDI != "Upload your own custom MIDI":
|
| 397 |
+
print('Loading seed MIDI...')
|
| 398 |
+
f = '/content/Monster-MIDI-Dataset/Seeds/'+select_seed_MIDI+'.mid'
|
| 399 |
+
|
| 400 |
+
else:
|
| 401 |
+
print('Upload your own custom MIDI...')
|
| 402 |
+
print('=' * 70)
|
| 403 |
+
uploaded_MIDI = files.upload()
|
| 404 |
+
if list(uploaded_MIDI.keys()):
|
| 405 |
+
f = list(uploaded_MIDI.keys())[0]
|
| 406 |
+
|
| 407 |
+
if f != '':
|
| 408 |
+
|
| 409 |
+
print('=' * 70)
|
| 410 |
+
print('File:', f)
|
| 411 |
+
print('=' * 70)
|
| 412 |
+
|
| 413 |
+
#=======================================================
|
| 414 |
+
# START PROCESSING
|
| 415 |
+
|
| 416 |
+
# Convering MIDI to ms score with MIDI.py module
|
| 417 |
+
score = TMIDIX.midi2single_track_ms_score(open(f, 'rb').read(), recalculate_channels=False)
|
| 418 |
+
|
| 419 |
+
# INSTRUMENTS CONVERSION CYCLE
|
| 420 |
+
events_matrix = []
|
| 421 |
+
itrack = 1
|
| 422 |
+
patches = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
| 423 |
+
|
| 424 |
+
while itrack < len(score):
|
| 425 |
+
for event in score[itrack]:
|
| 426 |
+
if event[0] == 'note' or event[0] == 'patch_change':
|
| 427 |
+
events_matrix.append(event)
|
| 428 |
+
itrack += 1
|
| 429 |
+
|
| 430 |
+
events_matrix.sort(key=lambda x: x[1])
|
| 431 |
+
|
| 432 |
+
events_matrix1 = []
|
| 433 |
+
|
| 434 |
+
for event in events_matrix:
|
| 435 |
+
if event[0] == 'patch_change':
|
| 436 |
+
patches[event[2]] = event[3]
|
| 437 |
+
|
| 438 |
+
if event[0] == 'note':
|
| 439 |
+
event.extend([patches[event[3]]])
|
| 440 |
+
|
| 441 |
+
if events_matrix1:
|
| 442 |
+
if (event[1] == events_matrix1[-1][1]):
|
| 443 |
+
if ([event[3], event[4]] != events_matrix1[-1][3:5]):
|
| 444 |
+
events_matrix1.append(event)
|
| 445 |
+
else:
|
| 446 |
+
events_matrix1.append(event)
|
| 447 |
+
|
| 448 |
+
else:
|
| 449 |
+
events_matrix1.append(event)
|
| 450 |
+
|
| 451 |
+
if len(events_matrix1) > 0:
|
| 452 |
+
if min([e[1] for e in events_matrix1]) >= 0 and min([e[2] for e in events_matrix1]) >= 0:
|
| 453 |
+
|
| 454 |
+
#=======================================================
|
| 455 |
+
# PRE-PROCESSING
|
| 456 |
+
|
| 457 |
+
# checking number of instruments in a composition
|
| 458 |
+
instruments_list_without_drums = list(set([y[3] for y in events_matrix1 if y[3] != 9]))
|
| 459 |
+
instruments_list = list(set([y[3] for y in events_matrix1]))
|
| 460 |
+
|
| 461 |
+
if len(events_matrix1) > 0 and len(instruments_list_without_drums) > 0:
|
| 462 |
+
|
| 463 |
+
#======================================
|
| 464 |
+
|
| 465 |
+
events_matrix2 = []
|
| 466 |
+
|
| 467 |
+
# Recalculating timings
|
| 468 |
+
for e in events_matrix1:
|
| 469 |
+
|
| 470 |
+
# Original timings
|
| 471 |
+
e[1] = int(e[1] / 16)
|
| 472 |
+
e[2] = int(e[2] / 16)
|
| 473 |
+
|
| 474 |
+
#===================================
|
| 475 |
+
# ORIGINAL COMPOSITION
|
| 476 |
+
#===================================
|
| 477 |
+
|
| 478 |
+
# Sorting by patch, pitch, then by start-time
|
| 479 |
+
|
| 480 |
+
events_matrix1.sort(key=lambda x: x[6])
|
| 481 |
+
events_matrix1.sort(key=lambda x: x[4], reverse=True)
|
| 482 |
+
events_matrix1.sort(key=lambda x: x[1])
|
| 483 |
+
|
| 484 |
+
#=======================================================
|
| 485 |
+
# FINAL PROCESSING
|
| 486 |
+
|
| 487 |
+
melody_chords = []
|
| 488 |
+
melody_chords2 = []
|
| 489 |
+
|
| 490 |
+
# Break between compositions / Intro seq
|
| 491 |
+
|
| 492 |
+
if 9 in instruments_list:
|
| 493 |
+
drums_present = 18947 # Yes
|
| 494 |
+
else:
|
| 495 |
+
drums_present = 18946 # No
|
| 496 |
+
|
| 497 |
+
if events_matrix1[0][3] != 9:
|
| 498 |
+
pat = events_matrix1[0][6]
|
| 499 |
+
else:
|
| 500 |
+
pat = 128
|
| 501 |
+
|
| 502 |
+
melody_chords.extend([19077, drums_present, 18948+pat, 0]) # Intro seq
|
| 503 |
+
|
| 504 |
+
#=======================================================
|
| 505 |
+
# MAIN PROCESSING CYCLE
|
| 506 |
+
#=======================================================
|
| 507 |
+
|
| 508 |
+
abs_time = 0
|
| 509 |
+
|
| 510 |
+
pbar_time = 0
|
| 511 |
+
|
| 512 |
+
pe = events_matrix1[0]
|
| 513 |
+
|
| 514 |
+
chords_counter = 1
|
| 515 |
+
|
| 516 |
+
comp_chords_len = len(list(set([y[1] for y in events_matrix1])))
|
| 517 |
+
|
| 518 |
+
for e in events_matrix1:
|
| 519 |
+
|
| 520 |
+
#=======================================================
|
| 521 |
+
# Timings...
|
| 522 |
+
|
| 523 |
+
# Cliping all values...
|
| 524 |
+
delta_time = max(0, min(255, e[1]-pe[1]))
|
| 525 |
+
|
| 526 |
+
# Durations and channels
|
| 527 |
+
|
| 528 |
+
dur = max(0, min(255, e[2]))
|
| 529 |
+
cha = max(0, min(15, e[3]))
|
| 530 |
+
|
| 531 |
+
# Patches
|
| 532 |
+
if cha == 9: # Drums patch will be == 128
|
| 533 |
+
pat = 128
|
| 534 |
+
|
| 535 |
+
else:
|
| 536 |
+
pat = e[6]
|
| 537 |
+
|
| 538 |
+
# Pitches
|
| 539 |
+
|
| 540 |
+
ptc = max(1, min(127, e[4]))
|
| 541 |
+
|
| 542 |
+
# Velocities
|
| 543 |
+
|
| 544 |
+
# Calculating octo-velocity
|
| 545 |
+
vel = max(8, min(127, e[5]))
|
| 546 |
+
velocity = round(vel / 15)-1
|
| 547 |
+
|
| 548 |
+
#=======================================================
|
| 549 |
+
# Outro seq
|
| 550 |
+
|
| 551 |
+
# if ((comp_chords_len - chords_counter) == 50) and (delta_time != 0):
|
| 552 |
+
# out_t = 18946+delta_time
|
| 553 |
+
# out_p = 19202+ptc
|
| 554 |
+
# melody_chords.extend([18945, out_t, out_p]) # outro seq
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
# if delta_time != 0:
|
| 558 |
+
# chords_counter += 1
|
| 559 |
+
|
| 560 |
+
#=======================================================
|
| 561 |
+
# FINAL NOTE SEQ
|
| 562 |
+
|
| 563 |
+
# Writing final note asynchronously
|
| 564 |
+
|
| 565 |
+
dur_vel = (8 * dur) + velocity
|
| 566 |
+
pat_ptc = (129 * pat) + ptc
|
| 567 |
+
|
| 568 |
+
if delta_time != 0:
|
| 569 |
+
melody_chords.extend([delta_time, dur_vel+256, pat_ptc+2304])
|
| 570 |
+
else:
|
| 571 |
+
melody_chords.extend([dur_vel+256, pat_ptc+2304])
|
| 572 |
+
melody_chords2.append([delta_time, dur_vel+256, pat_ptc+2304])
|
| 573 |
+
|
| 574 |
+
pe = e
|
| 575 |
+
|
| 576 |
+
#=======================================================
|
| 577 |
+
|
| 578 |
+
# melody_chords.extend([19462, 19462, 19462]) # EOS
|
| 579 |
+
|
| 580 |
+
#=======================================================
|
| 581 |
+
|
| 582 |
+
# TOTAL DICTIONARY SIZE 19462+1=19463
|
| 583 |
+
#=======================================================
|
| 584 |
+
|
| 585 |
+
#=======================================================
|
| 586 |
+
|
| 587 |
+
song = melody_chords
|
| 588 |
+
|
| 589 |
+
song_f = []
|
| 590 |
+
|
| 591 |
+
time = 0
|
| 592 |
+
dur = 0
|
| 593 |
+
vel = 90
|
| 594 |
+
pitch = 0
|
| 595 |
+
channel = 0
|
| 596 |
+
|
| 597 |
+
patches = [-1] * 16
|
| 598 |
+
|
| 599 |
+
channels = [0] * 16
|
| 600 |
+
channels[9] = 1
|
| 601 |
+
|
| 602 |
+
for ss in song:
|
| 603 |
+
|
| 604 |
+
if 0 <= ss < 256:
|
| 605 |
+
|
| 606 |
+
time += ss * 16
|
| 607 |
+
|
| 608 |
+
if 256 <= ss < 2304:
|
| 609 |
+
|
| 610 |
+
dur = ((ss-256) // 8) * 16
|
| 611 |
+
vel = (((ss-256) % 8)+1) * 15
|
| 612 |
+
|
| 613 |
+
if 2304 <= ss < 18945:
|
| 614 |
+
|
| 615 |
+
patch = (ss-2304) // 129
|
| 616 |
+
|
| 617 |
+
if patch < 128:
|
| 618 |
+
|
| 619 |
+
if patch not in patches:
|
| 620 |
+
if 0 in channels:
|
| 621 |
+
cha = channels.index(0)
|
| 622 |
+
channels[cha] = 1
|
| 623 |
+
else:
|
| 624 |
+
cha = 15
|
| 625 |
+
|
| 626 |
+
patches[cha] = patch
|
| 627 |
+
channel = patches.index(patch)
|
| 628 |
+
else:
|
| 629 |
+
channel = patches.index(patch)
|
| 630 |
+
|
| 631 |
+
if patch == 128:
|
| 632 |
+
channel = 9
|
| 633 |
+
|
| 634 |
+
pitch = (ss-2304) % 129
|
| 635 |
+
|
| 636 |
+
song_f.append(['note', time, dur, channel, pitch, vel, patch ])
|
| 637 |
+
|
| 638 |
+
patches = [0 if x==-1 else x for x in patches]
|
| 639 |
+
|
| 640 |
+
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
|
| 641 |
+
output_signature = 'Monster Music Transformer',
|
| 642 |
+
output_file_name = '/content/Monster-Music-Transformer-Seed-Composition',
|
| 643 |
+
track_name='Project Los Angeles',
|
| 644 |
+
list_of_MIDI_patches=patches
|
| 645 |
+
)
|
| 646 |
+
|
| 647 |
+
#=======================================================
|
| 648 |
+
|
| 649 |
+
print('=' * 70)
|
| 650 |
+
print('Composition stats:')
|
| 651 |
+
print('Composition has', len(melody_chords2), 'notes')
|
| 652 |
+
print('Composition has', len(melody_chords), 'tokens')
|
| 653 |
+
print('Composition MIDI patches:', sorted(list(set([((y-2304) // 129) for y in melody_chords if 2304 <= y < 18945]))))
|
| 654 |
+
print('=' * 70)
|
| 655 |
+
|
| 656 |
+
print('Displaying resulting composition...')
|
| 657 |
+
print('=' * 70)
|
| 658 |
+
|
| 659 |
+
fname = '/content/Monster-Music-Transformer-Seed-Composition'
|
| 660 |
+
|
| 661 |
+
if render_MIDI_to_audio:
|
| 662 |
+
midi_audio = midi_to_colab_audio(fname + '.mid')
|
| 663 |
+
display(Audio(midi_audio, rate=16000, normalize=False))
|
| 664 |
+
|
| 665 |
+
TMIDIX.plot_ms_SONG(song_f, plot_title=fname)
|
| 666 |
+
|
| 667 |
+
else:
|
| 668 |
+
print('=' * 70)
|
| 669 |
+
|
| 670 |
+
"""# (CONTINUATION)"""
|
| 671 |
+
|
| 672 |
+
#@title Standard Continuation
|
| 673 |
+
|
| 674 |
+
#@markdown Generation settings
|
| 675 |
+
|
| 676 |
+
try_to_generate_outro = False #@param {type:"boolean"}
|
| 677 |
+
number_of_prime_tokens = 7191 # @param {type:"slider", min:3, max:8190, step:3}
|
| 678 |
+
number_of_tokens_to_generate = 504 # @param {type:"slider", min:30, max:8190, step:3}
|
| 679 |
+
number_of_batches_to_generate = 4 #@param {type:"slider", min:1, max:16, step:1}
|
| 680 |
+
temperature = 0.9 # @param {type:"slider", min:0.1, max:1, step:0.05}
|
| 681 |
+
|
| 682 |
+
#@markdown Other settings
|
| 683 |
+
include_prime_tokens_in_generated_output = False #@param {type:"boolean"}
|
| 684 |
+
allow_model_to_stop_generation_if_needed = False #@param {type:"boolean"}
|
| 685 |
+
render_MIDI_to_audio = True # @param {type:"boolean"}
|
| 686 |
+
|
| 687 |
+
print('=' * 70)
|
| 688 |
+
print('Monster Music Transformer Standard Continuation Model Generator')
|
| 689 |
+
print('=' * 70)
|
| 690 |
+
|
| 691 |
+
if allow_model_to_stop_generation_if_needed:
|
| 692 |
+
min_stop_token = 19078
|
| 693 |
+
else:
|
| 694 |
+
min_stop_token = None
|
| 695 |
+
|
| 696 |
+
outy = melody_chords[:number_of_prime_tokens]
|
| 697 |
+
|
| 698 |
+
if try_to_generate_outro:
|
| 699 |
+
outy.extend([18945])
|
| 700 |
+
|
| 701 |
+
torch.cuda.empty_cache()
|
| 702 |
+
|
| 703 |
+
inp = [outy] * number_of_batches_to_generate
|
| 704 |
+
|
| 705 |
+
inp = torch.LongTensor(inp).cuda()
|
| 706 |
+
|
| 707 |
+
with ctx:
|
| 708 |
+
out = model.generate(inp,
|
| 709 |
+
number_of_tokens_to_generate,
|
| 710 |
+
temperature=temperature,
|
| 711 |
+
return_prime=include_prime_tokens_in_generated_output,
|
| 712 |
+
eos_token=min_stop_token,
|
| 713 |
+
verbose=True)
|
| 714 |
+
|
| 715 |
+
out0 = out.tolist()
|
| 716 |
+
|
| 717 |
+
torch.cuda.empty_cache()
|
| 718 |
+
|
| 719 |
+
print('=' * 70)
|
| 720 |
+
print('Done!')
|
| 721 |
+
print('=' * 70)
|
| 722 |
+
|
| 723 |
+
#======================================================================
|
| 724 |
+
print('Rendering results...')
|
| 725 |
+
|
| 726 |
+
for i in range(number_of_batches_to_generate):
|
| 727 |
+
|
| 728 |
+
print('=' * 70)
|
| 729 |
+
print('Batch #', i)
|
| 730 |
+
print('=' * 70)
|
| 731 |
+
|
| 732 |
+
out1 = out0[i]
|
| 733 |
+
|
| 734 |
+
print('Sample INTs', out1[:12])
|
| 735 |
+
print('=' * 70)
|
| 736 |
+
|
| 737 |
+
if len(out) != 0:
|
| 738 |
+
|
| 739 |
+
song = out1
|
| 740 |
+
song_f = []
|
| 741 |
+
|
| 742 |
+
time = 0
|
| 743 |
+
dur = 0
|
| 744 |
+
vel = 90
|
| 745 |
+
pitch = 0
|
| 746 |
+
channel = 0
|
| 747 |
+
|
| 748 |
+
patches = [-1] * 16
|
| 749 |
+
|
| 750 |
+
channels = [0] * 16
|
| 751 |
+
channels[9] = 1
|
| 752 |
+
|
| 753 |
+
for ss in song:
|
| 754 |
+
|
| 755 |
+
if 0 <= ss < 256:
|
| 756 |
+
|
| 757 |
+
time += ss * 16
|
| 758 |
+
|
| 759 |
+
if 256 <= ss < 2304:
|
| 760 |
+
|
| 761 |
+
dur = ((ss-256) // 8) * 16
|
| 762 |
+
vel = (((ss-256) % 8)+1) * 15
|
| 763 |
+
|
| 764 |
+
if 2304 <= ss < 18945:
|
| 765 |
+
|
| 766 |
+
patch = (ss-2304) // 129
|
| 767 |
+
|
| 768 |
+
if patch < 128:
|
| 769 |
+
|
| 770 |
+
if patch not in patches:
|
| 771 |
+
if 0 in channels:
|
| 772 |
+
cha = channels.index(0)
|
| 773 |
+
channels[cha] = 1
|
| 774 |
+
else:
|
| 775 |
+
cha = 15
|
| 776 |
+
|
| 777 |
+
patches[cha] = patch
|
| 778 |
+
channel = patches.index(patch)
|
| 779 |
+
else:
|
| 780 |
+
channel = patches.index(patch)
|
| 781 |
+
|
| 782 |
+
if patch == 128:
|
| 783 |
+
channel = 9
|
| 784 |
+
|
| 785 |
+
pitch = (ss-2304) % 129
|
| 786 |
+
|
| 787 |
+
song_f.append(['note', time, dur, channel, pitch, vel, patch ])
|
| 788 |
+
|
| 789 |
+
patches = [0 if x==-1 else x for x in patches]
|
| 790 |
+
|
| 791 |
+
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
|
| 792 |
+
output_signature = 'Monster Music Transformer',
|
| 793 |
+
output_file_name = '/content/Monster-Music-Transformer-Music-Composition_'+str(i),
|
| 794 |
+
track_name='Project Los Angeles',
|
| 795 |
+
list_of_MIDI_patches=patches
|
| 796 |
+
)
|
| 797 |
+
print('=' * 70)
|
| 798 |
+
print('Displaying resulting composition...')
|
| 799 |
+
print('=' * 70)
|
| 800 |
+
|
| 801 |
+
fname = '/content/Monster-Music-Transformer-Music-Composition_'+str(i)
|
| 802 |
+
|
| 803 |
+
if render_MIDI_to_audio:
|
| 804 |
+
midi_audio = midi_to_colab_audio(fname + '.mid')
|
| 805 |
+
display(Audio(midi_audio, rate=16000, normalize=False))
|
| 806 |
+
|
| 807 |
+
TMIDIX.plot_ms_SONG(song_f, plot_title=fname)
|
| 808 |
+
|
| 809 |
+
"""# Congrats! You did it! :)"""
|