Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

whisper : support speaker segmentation (local diarization) of mono audio via tinydiarize #1058

Merged
merged 17 commits into from Jul 4, 2023
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 4 additions & 0 deletions Makefile
Expand Up @@ -302,12 +302,16 @@ samples:
@wget --quiet --show-progress -O samples/gb1.ogg https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg
@wget --quiet --show-progress -O samples/hp0.ogg https://upload.wikimedia.org/wikipedia/en/d/d4/En.henryfphillips.ogg
@wget --quiet --show-progress -O samples/mm1.wav https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav
@wget --quiet --show-progress -O samples/a13.mp3 https://upload.wikimedia.org/wikipedia/commons/transcoded/6/6f/Apollo13-wehaveaproblem.ogg/Apollo13-wehaveaproblem.ogg.mp3
@echo "Converting to 16-bit WAV ..."
@ffmpeg -loglevel -0 -y -i samples/gb0.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/gb0.wav
@ffmpeg -loglevel -0 -y -i samples/gb1.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/gb1.wav
@ffmpeg -loglevel -0 -y -i samples/hp0.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/hp0.wav
@rm samples/*.ogg
@ffmpeg -loglevel -0 -y -i samples/mm1.wav -ar 16000 -ac 1 -c:a pcm_s16le samples/mm0.wav
@rm samples/mm1.wav
@ffmpeg -loglevel -0 -y -i samples/a13.mp3 -ar 16000 -ac 1 -c:a pcm_s16le -ss 00:00:00 -to 00:00:30 samples/a13.wav
@rm samples/a13.mp3

#
# Models
Expand Down
8 changes: 4 additions & 4 deletions bindings/go/whisper.go
Expand Up @@ -270,13 +270,13 @@ func (ctx *Context) Whisper_token_lang(lang_id int) Token {
}

// Task tokens
func Whisper_token_translate() Token {
return Token(C.whisper_token_translate())
func (ctx *Context) Whisper_token_translate() Token {
return Token(C.whisper_token_translate((*C.struct_whisper_context)(ctx)))
}

// Task tokens
func Whisper_token_transcribe() Token {
return Token(C.whisper_token_transcribe())
func (ctx *Context) Whisper_token_transcribe() Token {
return Token(C.whisper_token_transcribe((*C.struct_whisper_context)(ctx)))
}

// Performance information
Expand Down
Expand Up @@ -224,8 +224,8 @@ public interface WhisperCppJnaLibrary extends Library {
int whisper_token_lang(Pointer ctx, int lang_id);

// Task tokens
int whisper_token_translate();
int whisper_token_transcribe();
int whisper_token_translate (Pointer ctx);
int whisper_token_transcribe(Pointer ctx);

// Performance information from the default state.
void whisper_print_timings(Pointer ctx);
Expand Down
7 changes: 5 additions & 2 deletions examples/main/main.cpp
Expand Up @@ -279,7 +279,7 @@ void whisper_print_segment_callback(struct whisper_context * ctx, struct whisper
for (int j = 0; j < whisper_full_n_tokens(ctx, i); ++j) {
if (params.print_special == false) {
const whisper_token id = whisper_full_get_token_id(ctx, i, j);
if (id >= whisper_token_eot(ctx)) {
if (id >= whisper_token_eot(ctx) && id != whisper_token_solm(ctx)) { // TODO@Akash - make configurable?
continue;
}
}
Expand Down Expand Up @@ -566,6 +566,7 @@ bool output_json(struct whisper_context * ctx, const char * fname, const whisper
const char * text = whisper_full_get_segment_text(ctx, i);
const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
const bool speaker_turn_next = whisper_full_get_segment_speaker_turn_next(ctx, i);

start_obj(nullptr);
start_obj("timestamps");
Expand All @@ -576,11 +577,13 @@ bool output_json(struct whisper_context * ctx, const char * fname, const whisper
value_i("from", t0 * 10, false);
value_i("to", t1 * 10, true);
end_obj(false);
value_s("text", text, !params.diarize);
value_s("text", text, !params.diarize); // TODO@Akash - make configurable with flag

if (params.diarize && pcmf32s.size() == 2) {
value_s("speaker", estimate_diarization_speaker(pcmf32s, t0, t1, true).c_str(), true);
}
// TODO@Akash - make configurable with flag
value_b("speaker_turn_next", speaker_turn_next, true);
end_obj(i == (n_segments - 1));
}

Expand Down
8 changes: 7 additions & 1 deletion models/download-ggml-model.sh
Expand Up @@ -22,7 +22,7 @@ function get_script_path() {
models_path="$(get_script_path)"

# Whisper models
models=( "tiny.en" "tiny" "base.en" "base" "small.en" "small" "medium.en" "medium" "large-v1" "large" )
models=( "tiny.en" "tiny" "base.en" "base" "small.en" "small.en-tdrz" "small" "medium.en" "medium" "large-v1" "large" )

# list available models
function list_models {
Expand Down Expand Up @@ -50,6 +50,12 @@ if [[ ! " ${models[@]} " =~ " ${model} " ]]; then
exit 1
fi

# check if model contains `tdrz` and update the src and pfx accordingly
if [[ $model == *"tdrz"* ]]; then
src="https://huggingface.co/akashmjn/tinydiarize-whisper.cpp"
pfx="resolve/main/ggml"
fi

# download ggml model

printf "Downloading ggml model $model from '$src' ...\n"
Expand Down
62 changes: 40 additions & 22 deletions whisper.cpp
Expand Up @@ -380,16 +380,17 @@ struct whisper_vocab {
std::map<token, id> token_to_id;
std::map<id, token> id_to_token;

id token_eot = 50256;
id token_sot = 50257;
id token_prev = 50360;
id token_solm = 50361; // ??
id token_not = 50362; // no timestamps
id token_beg = 50363;

// available tasks
static const id token_translate = 50358;
static const id token_transcribe = 50359;
// reference: https://github.com/openai/whisper/blob/248b6cb124225dd263bb9bd32d060b6517e067f8/whisper/tokenizer.py#L334-L349
id token_eot = 50256;
id token_sot = 50257;
// task tokens (used only for multilingual models)
id token_translate = 50357;
id token_transcribe = 50358;
// other special tokens
id token_solm = 50359; // ?? TODO@Akash - rename appropriately
id token_prev = 50360;
id token_not = 50362; // no timestamps
id token_beg = 50363; // begin timestamps

bool is_multilingual() const {
return n_vocab == 51865;
Expand All @@ -403,6 +404,8 @@ struct whisper_segment {
std::string text;

std::vector<whisper_token_data> tokens;

bool speaker_turn_next;
};

// medium
Expand Down Expand Up @@ -966,8 +969,10 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
if (vocab.is_multilingual()) {
vocab.token_eot++;
vocab.token_sot++;
vocab.token_prev++;
vocab.token_translate++;
vocab.token_transcribe++;
vocab.token_solm++;
vocab.token_prev++;
vocab.token_not++;
vocab.token_beg++;
}
Expand All @@ -981,6 +986,8 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
word = "[_EOT_]";
} else if (i == vocab.token_sot) {
word = "[_SOT_]";
} else if (i == vocab.token_solm) { // TODO@Akash make this configurable
word = " [SPEAKER TURN]";
} else if (i == vocab.token_prev) {
word = "[_PREV_]";
} else if (i == vocab.token_not) {
Expand Down Expand Up @@ -3228,12 +3235,12 @@ whisper_token whisper_token_lang(struct whisper_context * ctx, int lang_id) {
return whisper_token_sot(ctx) + 1 + lang_id;
}

whisper_token whisper_token_translate(void) {
return whisper_vocab::token_translate;
whisper_token whisper_token_translate(struct whisper_context * ctx) {
return ctx->vocab.token_translate;
}

whisper_token whisper_token_transcribe(void) {
return whisper_vocab::token_transcribe;
whisper_token whisper_token_transcribe(struct whisper_context * ctx) {
return ctx->vocab.token_transcribe;
}

void whisper_print_timings(struct whisper_context * ctx) {
Expand Down Expand Up @@ -3521,7 +3528,7 @@ static void whisper_process_logits(

// suppress sot and solm tokens
logits[vocab.token_sot] = -INFINITY;
logits[vocab.token_solm] = -INFINITY;
// logits[vocab.token_solm] = -INFINITY;
akashmjn marked this conversation as resolved.
Show resolved Hide resolved

// suppress task tokens
logits[vocab.token_translate] = -INFINITY;
Expand Down Expand Up @@ -4018,9 +4025,9 @@ int whisper_full_with_state(
state->lang_id = lang_id;
prompt_init.push_back(whisper_token_lang(ctx, lang_id));
if (params.translate) {
prompt_init.push_back(whisper_token_translate());
prompt_init.push_back(whisper_token_translate(ctx));
} else {
prompt_init.push_back(whisper_token_transcribe());
prompt_init.push_back(whisper_token_transcribe(ctx));
}
}

Expand Down Expand Up @@ -4500,23 +4507,29 @@ int whisper_full_with_state(
prompt_past.push_back(tokens_cur[i].id);
}

// store the text from this iteration
if (!tokens_cur.empty() && ctx->model.n_loaded > 0) {
int i0 = 0;
auto t0 = seek + 2*(tokens_cur.front().tid - whisper_token_beg(ctx));

std::string text;
bool speaker_turn_next;

for (int i = 0; i < (int) tokens_cur.size(); i++) {
//printf("%s: %18s %6.3f %18s %6.3f\n", __func__,
// ctx->vocab.id_to_token[tokens_cur[i].id].c_str(), tokens_cur[i].p,
// ctx->vocab.id_to_token[tokens_cur[i].tid].c_str(), tokens_cur[i].pt);

if (params.print_special == false && tokens_cur[i].id >= whisper_token_eot(ctx)) {
if (params.print_special == false && tokens_cur[i].id >= whisper_token_eot(ctx) &&
tokens_cur[i].id != whisper_token_solm(ctx)) { // TODO@Akash - make configurable with flag (may not want it in text)
} else {
text += whisper_token_to_str(ctx, tokens_cur[i].id);
}

// record if speaker turn was predicted after current segment
if (tokens_cur[i].id == whisper_token_solm(ctx)){
ggerganov marked this conversation as resolved.
Show resolved Hide resolved
speaker_turn_next = true;
}

if (tokens_cur[i].id > whisper_token_beg(ctx) && !params.single_segment) {
const auto t1 = seek + 2*(tokens_cur[i].tid - whisper_token_beg(ctx));

Expand All @@ -4535,7 +4548,7 @@ int whisper_full_with_state(

//printf("tt0 = %d, tt1 = %d, text = %s, token = %s, token_id = %d, tid = %d\n", tt0, tt1, text.c_str(), ctx->vocab.id_to_token[tokens_cur[i].id].c_str(), tokens_cur[i].id, tokens_cur[i].tid);

result_all.push_back({ tt0, tt1, text, {} });
result_all.push_back({ tt0, tt1, text, {} , speaker_turn_next });
for (int j = i0; j <= i; j++) {
result_all.back().tokens.push_back(tokens_cur[j]);
}
Expand All @@ -4561,6 +4574,7 @@ int whisper_full_with_state(
i--;
t0 = t1;
i0 = i + 1;
speaker_turn_next = false;
}
}

Expand All @@ -4579,7 +4593,7 @@ int whisper_full_with_state(
}
}

result_all.push_back({ tt0, tt1, text, {} });
result_all.push_back({ tt0, tt1, text, {} , speaker_turn_next });
for (int j = i0; j < (int) tokens_cur.size(); j++) {
result_all.back().tokens.push_back(tokens_cur[j]);
}
Expand Down Expand Up @@ -4759,6 +4773,10 @@ int64_t whisper_full_get_segment_t1(struct whisper_context * ctx, int i_segment)
return ctx->state->result_all[i_segment].t1;
}

bool whisper_full_get_segment_speaker_turn_next(struct whisper_context * ctx, int i_segment) {
return ctx->state->result_all[i_segment].speaker_turn_next;
}

const char * whisper_full_get_segment_text_from_state(struct whisper_state * state, int i_segment) {
return state->result_all[i_segment].text.c_str();
}
Expand Down
7 changes: 5 additions & 2 deletions whisper.h
Expand Up @@ -284,8 +284,8 @@ extern "C" {
WHISPER_API whisper_token whisper_token_lang(struct whisper_context * ctx, int lang_id);

// Task tokens
WHISPER_API whisper_token whisper_token_translate (void);
WHISPER_API whisper_token whisper_token_transcribe(void);
WHISPER_API whisper_token whisper_token_translate (struct whisper_context * ctx);
WHISPER_API whisper_token whisper_token_transcribe(struct whisper_context * ctx);

// Performance information from the default state.
WHISPER_API void whisper_print_timings(struct whisper_context * ctx);
Expand Down Expand Up @@ -460,6 +460,9 @@ extern "C" {
WHISPER_API int64_t whisper_full_get_segment_t1 (struct whisper_context * ctx, int i_segment);
WHISPER_API int64_t whisper_full_get_segment_t1_from_state(struct whisper_state * state, int i_segment);

// Get whether the next segment is predicted as a speaker turn
WHISPER_API bool whisper_full_get_segment_speaker_turn_next(struct whisper_context * ctx, int i_segment);

// Get the text of the specified segment
WHISPER_API const char * whisper_full_get_segment_text (struct whisper_context * ctx, int i_segment);
WHISPER_API const char * whisper_full_get_segment_text_from_state(struct whisper_state * state, int i_segment);
Expand Down