Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
Michael Ganss committed May 8, 2019
0 parents commit e5480d0
Show file tree
Hide file tree
Showing 9 changed files with 24,667 additions and 0 deletions.
1 change: 1 addition & 0 deletions .gitignore
@@ -0,0 +1 @@
/.vscode
Binary file added DrumGen.amxd
Binary file not shown.
41 changes: 41 additions & 0 deletions README.md
@@ -0,0 +1,41 @@
# DrumGen

A Max for Live 808/909-style drum pattern generator and sequencer based on a [Lempel-Ziv](https://en.wikipedia.org/wiki/LZ77_and_LZ78) model. The device has a built-in model that was generated from ~250 existing drum patterns using the incremental parsing algorithm described in [Guessing the Composer's Mind: Applying Universal Prediction to Musical Style](http://articles.ircam.fr/textes/Assayag99a/index.pdf).

## Features

* Generate patterns specific to 11 different instruments
* Create MIDI clips
* Continue mode, continues an existing pattern
* Patterns have low/high velocity
* Additional total accent (configurable amount)
* Select pitch and low/high velocity for each instrument
* Shuffle
* Pattern length between 1 and 32 steps
* Select "House", "Breaks", or "All" style patterns (see below)
* 909-like flam (configurable amount)
* Presets

## Total accent

The total accent feature works similar to the 808/909 accent (AC). When it's enabled, the selected amount of velocity is added to some steps (depending on the generated AC pattern) for a maximum of 127. For example, if the high velocity for an instrument is 96 and it gets an accent of 40, the velocity will be 127.

## Flam

Flam is similar to 909 flam, i.e. steps for which the generated pattern has a flam will have two short successive hits. The duration of an individual hit is 10ms and the two hits occur at the selected interval. The default is 20ms, approx. like setting 11 or 12 on a 909 (see [here](http://www.e-licktronic.com/forum/viewtopic.php?f=25&t=1430) for measured timings on a real 909). In generated MIDI clips the duration is a bit longer, this seems to be a limitation of the Max for Live API.

## Styles

You can select between "House", "Breaks", or "All" style patterns. If you choose "House", the model is one where the existing drum patterns had bass drum hits on steps 1, 5, 9, and 13. The "Breaks" model was generated from patterns which did not follow this pattern.

## Create clip

If you click <kbd>Clip</kbd> a new clip with the current pattern is generated in the first empty clip slot of the current track. You might notice subtle timing differences when comparing recorded MIDI vs. created MIDI clips. This is a limitation of Max for Live.

## Continue

If the <kbd>Continue</kbd> toggle is activated, DrumGen will read MIDI notes from the first non-empty clip slot of the current track and continue the pattern. Notes are internally quantized to 16th notes (duration is irrelevant). Velocities that are equal to or higher than the selected high velocity are considered high, all others low. If more than one hit occurs in a 16th note interval, the step is considered a flam. If a clip is longer than two bars, only the last 32 steps are considered.

## Context

When a step is generated, the previous steps that are used to look up the possible next steps from the continuations dictionary are called the context. The maximum number of steps in the context can be selected within a range of 1 to 32. Lower numbers will result in more random output.
260 changes: 260 additions & 0 deletions drumgen.js
@@ -0,0 +1,260 @@
inlets = 1;
outlets = 1;
autowatch = 1;

include("model.js");

var temperature = 1.0;
var patternLength = 32;
var maxContext = 32;
var numInstruments = 12;
var style = "house";
var clipLength = 32;

function bang() {
generateAll();
}

function setStyle(s) {
style = s.toLowerCase();
}

function setLength(l) {
clipLength = l;
}

function setContext(c) {
maxContext = c;
}

function generateAll() {
callPresetStorageDump();
for (var i = 0; i < numInstruments; i++) {
generate(i, false);
}
}

function generate(i, dumpStorage) {
if (dumpStorage === undefined || dumpStorage)
callPresetStorageDump();

var dict = model[style][i];
var text = getTextToContinue(i);
var pattern = [];

while (pattern.length < patternLength) {
if (((text.length - 1) % 5) === 4) {
text = text + "-";
continue;
}

var context = text.slice(-maxContext);

while (!dict.hasOwnProperty(context)) {
context = context.slice(1);
}

var dist = dict[context];
var x = Math.random();

for (var k in dist) {
if (dist.hasOwnProperty(k)) {
var p = dist[k];
x = x - p;
if (x <= 0.0) {
text = text + k;
pattern.push(parseInt(k));
break;
}
}
}
}

outlet(0, i, "pitch", 1, pattern);
}

var storage = {};

function getTextToContinue(i) {
if (i > 0 && storage["cont"] === 1) {
var track = new LiveAPI("this_device canonical_parent");
var clipSlots = track.getcount("clip_slots");

for (var clipSlotNum = 0; clipSlotNum < clipSlots; clipSlotNum++) {
var clipSlot = new LiveAPI("this_device canonical_parent clip_slots " + clipSlotNum);
var hasClip = clipSlot.get("has_clip").toString() !== "0";
if (hasClip) {
var firstClip = new LiveAPI("this_device canonical_parent clip_slots " + clipSlotNum + " clip");
var notes = getClipNotes(firstClip);
var clipLength = firstClip.get("length");
var texts = getTextFromMidi(notes, clipLength);
return texts[i - 1];
}
}
}

return "^";
}

function createClip() {
var track = new LiveAPI("this_device canonical_parent");
var clipSlots = track.getcount("clip_slots");
var clipSlot;

var firstClip = null;

for (var clipSlotNum = 0; clipSlotNum < clipSlots; clipSlotNum++) {
clipSlot = new LiveAPI("this_device canonical_parent clip_slots " + clipSlotNum);
var hasClip = clipSlot.get("has_clip").toString() !== "0";
if (!hasClip) break;
}

if (clipSlotNum === clipSlots) {
// have to create new clip slot (scene)
var set = new LiveAPI("live_set");
set.call("create_scene", -1);
clipSlot = new LiveAPI("this_device canonical_parent clip_slots " + clipSlotNum);
}

var beats = Math.ceil(clipLength / 4);
clipSlot.call("create_clip", beats);
var clip = new LiveAPI("this_device canonical_parent clip_slots " + clipSlotNum + " clip");
var notes = generateMidi();

setNotes(clip, notes);
}

function Note(pitch, start, duration, velocity, muted) {
this.Pitch = pitch;
this.Start = start;
this.Duration = duration;
this.Velocity = velocity;
this.Muted = muted;
}

function setNotes(clip, notes) {
clip.call("set_notes");
clip.call("notes", notes.length);

for (var i = 0; i < notes.length; i++) {
var note = notes[i];
clip.call("note", note.Pitch, note.Start.toFixed(4), note.Duration.toFixed(4), note.Velocity, note.Muted);
}

clip.call("done");
}

function callPresetStorageDump() {
var presetStorage = this.patcher.getnamed("presetStorage");
presetStorage.message("dump");
}

var instruments = ["bd", "sd", "lt", "mt", "ht", "rs", "cp", "cb", "cy", "oh", "ch"];

function generateMidi() {
callPresetStorageDump();

var swing = parseInt(storage["swing"]);
var flam = storage["flam"] === 1;
var flamAmount = parseInt(storage["flamAmount"]);
var accent = storage["accent"] === 1;
var accentVel = parseInt(storage["accentVel"]);
var tempo = parseFloat(new LiveAPI("live_set").getstring("tempo"));
var beatsPerMs = tempo / (60.0 * 1000.0);
var notes = [];

for (var i = 0; i < instruments.length; i++) {
var instrument = instruments[i];
var toggle = storage[instrument + "::toggle"] === 1;
if (!toggle) continue;
var pitch = parseInt(storage[instrument + "::pitch"]);
var velLow = parseInt(storage[instrument + "::velLow"]);
var velHigh = parseInt(storage[instrument + "::velHigh"]);
for (var s = 0; s < clipLength; s++) {
var idx = 11 + s * 5;
var step = parseInt(storage[instrument + "::step"][idx]);
if (step === 0) continue;
var accentStep = parseInt(storage["ac::step"][idx]);
var delay = (s % 2) * (1.0 / 8.0) * (swing - 50.0) / 25.0;
var start = (s / 4.0) + delay;
var acVel = accent && accentStep > 0 ? accentVel : 0;
var velLo = Math.min(127, velLow + acVel);
var velHi = Math.min(127, velHigh + acVel);
if (flam && step === 5) {
var note1 = new Note(pitch, start, 10.0 * beatsPerMs, velLo, 0);
var note2 = new Note(pitch, start + flamAmount * beatsPerMs, 10.0 * beatsPerMs, velHi, 0);
notes.push(note1, note2);
} else {
var note = new Note(pitch, start, 1.0 / 8.0, step > 1 ? velHi : velLo, 0);
notes.push(note);
}
}
}

return notes;
}

function dumpPreset(path) {
var args = Array.prototype.slice.call(arguments).slice(1);
storage[path] = args.length > 1 ? args : args[0];
}

function getClipNotes(clip) {
var len = clip.get("length");
var data = clip.call("get_notes", 0, 0, len, 128);
var notes = [];

for (var i = 2; i < (data.length - 1); i += 6) {
var pitch = data[i + 1];
var start = data[i + 2];
var duration = data[i + 3];
var velocity = data[i + 4];
var muted = data[i + 5];
var note = new Note(pitch, start, duration, velocity, muted);
notes.push(note);
}

return notes;
}

function getTextFromMidi(notes, clipLength) {
var instrumentValues = [];
var steps = [];

for (var i = 0; i < instruments.length; i++) {
var instrument = instruments[i];
var pitch = parseInt(storage[instrument + "::pitch"]);
var velLow = parseInt(storage[instrument + "::velLow"]);
var velHigh = parseInt(storage[instrument + "::velHigh"]);
instrumentValues[pitch] = [i, velLow, velHigh];
steps[i] = [];
}

for (i = 0; i < notes.length; i++) {
var note = notes[i];
if (note.Muted === 1) continue;
var vals = instrumentValues[note.Pitch];

if (vals) {
var step = Math.floor(note.Start * 4.0);
var idx = vals[0];
var hi = vals[2];
var val = note.Velocity >= hi ? "2" : "1";

steps[idx][step] = steps[idx][step] ? "5" : val;
}
}

var texts = [];

for (i = 0; i < instruments.length; i++) {
var text = "^";
for (var s = 0; s < (clipLength * 4); s++) {
if (s > 0 && (s % 4) === 0) text += "-";
text += steps[i][s] || "0";
}
texts[i] = text;
}

return texts;
}

0 comments on commit e5480d0

Please sign in to comment.