Commit db566365 authored by Yoach Lacombe's avatar Yoach Lacombe
Browse files

clean artifacts

parent 64cfa64e
command:
- python3
- ${program}
- --fp16
- --fp16_full_eval
- --do_train
- --do_eval
- --trust_remote_code
- --overwrite_output_dir
- --ignore_mismatched_sizes
- --gradient_checkpointing
- ${args}
method: random
metric:
goal: maximize
name: eval/accuracy
parameters:
model_name_or_path:
value: facebook/mms-lid-126
train_dataset_name:
value: stable-speech/concatenated-normalized-accent-dataset
train_dataset_config_name:
value: default
train_split_name:
value: train
train_label_column_name:
value: labels
eval_dataset_name:
value: stable-speech/concatenated-normalized-accent-dataset
eval_dataset_config_name:
value: default
eval_split_name:
value: test
eval_label_column_name:
value: labels
output_dir:
value: ./
remove_unused_columns:
value: false
learning_rate:
value: 1e-4
lr_scheduler_type:
value: constant_with_warmup
max_length_seconds:
value: 20
min_length_seconds:
value: 5
attention_mask:
value: true
warmup_steps:
value: 50
max_steps:
value: 1000
per_device_train_batch_size:
value: 32
per_device_eval_batch_size:
value: 32
preprocessing_num_workers:
value: 4
dataloader_num_workers:
value: 4
logging_strategy:
value: steps
logging_steps:
value: 10
evaluation_strategy:
value: steps
eval_steps:
value: 1000
save_strategy:
value: steps
save_steps:
value: 1000
freeze_base_model:
values:
- false
- true
push_to_hub:
value: false
filter_threshold:
value: 1
feat_proj_dropout:
values:
- 0.0
- 0.1
- 0.2
attention_dropout:
values:
- 0.0
- 0.1
- 0.2
activation_dropout:
values:
- 0.0
- 0.1
- 0.2
hidden_dropout:
values:
- 0.0
- 0.1
- 0.2
final_dropout:
values:
- 0.0
- 0.1
- 0.2
mask_time_prob:
values:
- 0.0
- 0.1
- 0.2
mask_time_length:
values:
- 10
- 15
- 20
mask_feature_prob:
values:
- 0.0
- 0.1
- 0.2
mask_feature_length:
values:
- 10
- 15
- 20
program: run_audio_classification.py
project: mms-lid-accent-classification
\ No newline at end of file
#!/usr/bin/env bash
CUDA_VISIBLE_DEVICES=2 python run_audio_classification_one_layer.py \
--model_name_or_path "facebook/mms-lid-4017" \
--train_dataset_name "stable-speech/concatenated-normalized-accent-dataset" \
--train_dataset_config_name "default" \
--train_split_name "train" \
--train_label_column_name "labels" \
--eval_dataset_name "stable-speech/concatenated-normalized-accent-dataset" \
--eval_dataset_config_name "default" \
--eval_split_name "test" \
--eval_label_column_name "labels" \
--output_dir "./tmp/" \
--do_train \
--do_eval \
--overwrite_output_dir \
--remove_unused_columns false \
--fp16 \
--fp16_full_eval \
--learning_rate 1e-4 \
--max_length_seconds 20 \
--min_length_seconds 5 \
--attention_mask \
--warmup_steps 100 \
--max_steps 2000 \
--per_device_train_batch_size 32 \
--per_device_eval_batch_size 32 \
--preprocessing_num_workers 4 \
--dataloader_num_workers 4 \
--logging_strategy "steps" \
--logging_steps 10 \
--evaluation_strategy "steps" \
--eval_steps 300 \
--save_strategy "no" \
--save_steps 2000 \
--freeze_base_model true \
--freeze_feature_encoder true \
--push_to_hub false \
--trust_remote_code \
--use_weighted_layer_sum true \
#!/usr/bin/env bash
python run_audio_classification.py \
--model_name_or_path "facebook/mms-lid-126" \
--train_dataset_name "stable-speech/concatenated-normalized-accent-dataset+stable-speech/concatenated-common-voice-15-accented" \
--train_dataset_config_name "default+default" \
--train_split_name "train+train" \
--train_label_column_name "labels+labels" \
--eval_dataset_name "stable-speech/concatenated-normalized-accent-dataset" \
--eval_dataset_config_name "default" \
--eval_split_name "test" \
--eval_label_column_name "labels" \
--output_dir "./" \
--do_train \
--do_eval \
--overwrite_output_dir \
--remove_unused_columns False \
--fp16 \
--fp16_full_eval \
--learning_rate 1e-4 \
--lr_scheduler_type "constant_with_warmup" \
--max_length_seconds 20 \
--min_length_seconds 5 \
--attention_mask \
--warmup_steps 100 \
--max_steps 5000 \
--per_device_train_batch_size 32 \
--per_device_eval_batch_size 32 \
--preprocessing_num_workers 4 \
--dataloader_num_workers 4 \
--logging_strategy "steps" \
--logging_steps 10 \
--evaluation_strategy "steps" \
--eval_steps 1000 \
--save_strategy "no" \
--save_steps 5000 \
--filter_threshold 0.01 \
--freeze_base_model False \
--gradient_checkpointing \
--push_to_hub False \
--trust_remote_code
command:
- python3
- ${program}
- --fp16
- --fp16_full_eval
- --do_train
- --do_eval
- --trust_remote_code
- --overwrite_output_dir
- ${args}
method: random
metric:
goal: maximize
name: eval/accuracy
parameters:
model_name_or_path:
value: facebook/mms-lid-4017
train_dataset_name:
value: "stable-speech/concatenated-normalized-accent-dataset+stable-speech/concatenated-common-voice-15-accented"
train_dataset_config_name:
value: "default+default"
train_split_name:
value: "train+train"
train_label_column_name:
value: "labels+labels"
eval_dataset_name:
value: stable-speech/concatenated-normalized-accent-dataset
eval_dataset_config_name:
value: default
eval_split_name:
value: test
eval_label_column_name:
value: labels
output_dir:
value: "/raid/yoach/tmp/"
remove_unused_columns:
value: false
learning_rate:
distribution: log_uniform_values
min: 3e-6
max: 0.01
lr_scheduler_type:
value: constant
max_length_seconds:
value: 20 # give some data diversity for longer audio samples
min_length_seconds:
value: 5
attention_mask:
values:
- true
num_train_epochs:
values:
- 2
- 5
- 10
- 20
- 40
- 60
per_device_train_batch_size:
value: 32
per_device_eval_batch_size:
value: 32
preprocessing_num_workers:
value: 8
dataloader_num_workers:
value: 8
logging_strategy:
value: steps
logging_steps:
value: 10
evaluation_strategy:
value: steps
eval_steps:
value: 2000
save_strategy:
value: "no"
save_steps:
value: 2000
metric_for_best_model:
value: accuracy
push_to_hub:
value: false
use_weighted_layer_sum:
value: false
freeze_base_model:
value: true
max_samples_per_label:
value: 10000
save_to_disk:
value: "/raid/yoach/tmp_dataset_accents/"
temporary_save_to_disk:
value: "/raid/yoach/tmp_hidden_states/"
use_last_embedding_layer:
value: true
filter_threshold:
value: "0.001"
program: run_audio_classification.py
project: mms-lid-accent-classification-v2
#!/usr/bin/env bash
python run_audio_classification.py \
--model_name_or_path "hf-internal-testing/tiny-random-wav2vec2" \
--train_dataset_name "facebook/voxpopuli" \
--train_dataset_config_name "en_accented" \
--train_split_name "test" \
--train_label_column_name "accent" \
--eval_dataset_name "facebook/voxpopuli" \
--eval_dataset_config_name "en_accented" \
--eval_split_name "test" \
--eval_label_column_name "accent" \
--trust_remote_code \
--output_dir "./" \
--do_train \
--do_eval \
--max_train_samples 100 \
--max_eval_samples 100 \
--overwrite_output_dir \
--remove_unused_columns False \
--fp16 \
--learning_rate 1e-4 \
--min_length_seconds 5 \
--max_length_seconds 10 \
--attention_mask False \
--warmup_ratio 0.1 \
--num_train_epochs 5 \
--per_device_train_batch_size 4 \
--per_device_eval_batch_size 4 \
--dataloader_num_workers 0 \
--logging_strategy "steps" \
--logging_steps 10 \
--evaluation_strategy "epoch" \
--save_strategy "epoch" \
--load_best_model_at_end True \
--metric_for_best_model "accuracy" \
--save_total_limit 3 \
--seed 0
#!/usr/bin/env bash
python run_dataset_concatenation.py \
--dataset_name "sanchit-gandhi/vctk+facebook/voxpopuli+sanchit-gandhi/edacc-normalized" \
--dataset_config_name "default+en_accented+default" \
--dataset_split_name "train+test+validation" \
--label_column_name "accent+accent+accent" \
--text_column_name "text+normalized_text+text" \
--speaker_column_name "speaker_id+speaker_id+speaker" \
--batch_size 500 \
--output_dir "./concatenated-dataset"
python run_dataset_concatenation.py \
--dataset_name "sanchit-gandhi/edacc-normalized" \
--dataset_config_name "default" \
--dataset_split_name "test" \
--label_column_name "accent" \
--text_column_name "text" \
--speaker_column_name "speaker" \
--batch_size 500 \
--output_dir "./concatenated-dataset-test"
#!/usr/bin/env bash
python run_dataset_concatenation.py \
--dataset_name "stable-speech/common_voice_15_0_accented" \
--dataset_config_name "en" \
--dataset_split_name "train" \
--label_column_name "accent" \
--text_column_name "sentence" \
--speaker_column_name "client_id" \
--batch_size 250 \
--preprocessing_num_workers 4 \
--output_dir "./concatenated-dataset-cv"
python run_dataset_concatenation.py \
--dataset_name "stable-speech/common_voice_15_0_accented" \
--dataset_config_name "en" \
--dataset_split_name "test" \
--label_column_name "accent" \
--text_column_name "sentence" \
--speaker_column_name "client_id" \
--batch_size 250 \
--preprocessing_num_workers 4 \
--output_dir "./concatenated-dataset-cv-test"
import csv
import os
import re
import shutil
import sys
from dataclasses import dataclass, field
import soundfile as sf
from datasets import Audio, Dataset, DatasetDict, load_dataset
from tqdm import tqdm
from transformers import HfArgumentParser
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our data for prepareation
"""
dataset_dir: str = field(
default=None,
metadata={
"help": "Path where the EdAcc tar.gz archive is extracted. Leave in it's raw format: the script will "
"assume it's unchanged from the download and use relative paths to load the relevant audio files."
},
)
output_dir: str = field(
default=None,
metadata={
"help": "Where to save the processed dataset to disk. If unspecified, uses a 'pretty' version of the "
"original dataset name. E.g. 'facebook/voxpopuli' will be saved under 'voxpopuli'."
},
)
overwrite_output_dir: bool = field(
default=True,
metadata={"help": "Overwrite the content of the output directory."},
)
push_to_hub: bool = field(
default=False,
metadata={"help": "Whether or not to push the processed dataset to the Hub."},
)
hub_dataset_id: str = field(
default=False,
metadata={"help": "Repository namespace if pushing to the Hugging Face Hub."},
)
private_repo: bool = field(
default=True,
metadata={"help": "Whether or not to push the processed dataset to a private repository on the Hub"},
)
max_samples: int = field(
default=None,
metadata={"help": "Maximum number of samples per split. Useful for debugging purposes."},
)
def main():
# 1. Parse input arguments
parser = HfArgumentParser(DataTrainingArguments)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
data_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))[0]
else:
data_args = parser.parse_args_into_dataclasses()[0]
# 1. Load accents for each speaker
linguistic_background = {}
linguistic_background_csv = os.path.join(data_args.dataset_dir, "linguistic_background.csv")
with open(linguistic_background_csv, encoding="utf-8") as file:
reader = csv.DictReader(file, delimiter=",")
for line in reader:
linguistic_background[line["PARTICIPANT_ID"]] = line[
"How would you describe your accent in English? (e.g. Italian, Glaswegian)"
]
accent_dataset = load_dataset("sanchit-gandhi/edacc_accents", split="train")
def format_dataset(batch):
batch["speaker_id"] = (
batch["Final-Participant_ID"].replace("EAEC", "EDACC").replace("P1", "-A").replace("P2", "-B")
)
return batch
accent_dataset = accent_dataset.map(format_dataset, remove_columns=["Final-Participant_ID"])
# 2. Clean accents for each speaker
linguistic_background_clean = {
participant: accent.strip()
for participant, accent in zip(accent_dataset["speaker_id"], accent_dataset["English_Variety"])
}
linguistic_variety = {
participant: l1.strip() for participant, l1 in zip(accent_dataset["speaker_id"], accent_dataset["L1_Variety"])
}
# 3. Initialize dataset dict
raw_datasets = DatasetDict()
if data_args.overwrite_output_dir and os.path.exists(data_args.output_dir) and os.path.isdir(data_args.output_dir):
shutil.rmtree(data_args.output_dir)
output_dir_processed = os.path.join(data_args.output_dir, "processed")
# 4. Iterate over dev/test files
for split, split_formatted in zip(["dev", "test"], ["validation", "test"]):
data_dir = os.path.join(data_args.dataset_dir, split)
metadata = os.path.join(data_dir, "stm")
output_dir_split = os.path.join(output_dir_processed, split)
os.makedirs(output_dir_split, exist_ok=True)
all_speakers = []
all_genders = []
all_l1s = []
all_texts = []
all_audio_paths = []
all_normalized_accents = []
all_raw_accents = []
current_audio = None
current_audio_array = None
current_sampling_rate = None
current_counter = 1
gender_pat = r".*?\<(.*),.*"
l1_pat = r".*?\,(.*)>.*"
with open(metadata, "r") as file:
for idx, line in tqdm(enumerate(file), desc=split):
# example line is: 'EDACC-C06 1 EDACC-C06-A 0.00 5.27 <male,l1> C ELEVEN DASH P ONE\n
# the transcription always comes to the right of the last rangle bracket
text_idx = line.find(">") + 1
all_texts.append(line[text_idx + 1 : -1])
# the metadata immediately proceeds this
line = line[:text_idx]
file, channel, speaker, start, end, gender_l1 = line.split(" ")
# add speaker information to cumulative lists
all_raw_accents.append(linguistic_background[speaker])
all_normalized_accents.append(linguistic_background_clean[speaker])
all_speakers.append(speaker)
# add gender/l1 information
all_genders.append(re.search(gender_pat, gender_l1).group(1))
all_l1s.append(linguistic_variety[speaker])
# read audio file if different from previous
if file != current_audio:
current_audio_array, current_sampling_rate = sf.read(
os.path.join(data_args.dataset_dir, "data", file + ".wav")
)
current_audio = file
current_counter = 1
else:
current_counter += 1
# chunk audio file according to start/end times
start = int(float(start) * current_sampling_rate)
end = int(float(end) * current_sampling_rate)
end = min(end, len(current_audio_array))
chunked_audio = current_audio_array[start:end]
save_path = os.path.join(output_dir_split, f"{file}-{current_counter}.wav")
sf.write(save_path, chunked_audio, current_sampling_rate)
all_audio_paths.append(save_path)
if data_args.max_samples is not None and (data_args.max_samples - 1) == idx:
break
raw_datasets[split_formatted] = Dataset.from_dict(
{
"speaker": all_speakers,
"text": all_texts,
"accent": all_normalized_accents,
"raw_accent": all_raw_accents,
"gender": all_genders,
"l1": all_l1s,
"audio": all_audio_paths,
}
).cast_column("audio", Audio())
if data_args.push_to_hub:
raw_datasets.push_to_hub(data_args.hub_dataset_id, token=True)
raw_datasets.save_to_disk(data_args.output_dir)
if __name__ == "__main__":
main()
#!/usr/bin/env bash
python prepare_edacc.py \
--dataset_dir "/fsx/sanchit/edacc/edacc_v1.0" \
--output_dir "/fsx/sanchit/edacc_processed" \
--hub_dataset_id "sanchit-gandhi/edacc-normalized" \
--push_to_hub
This diff is collapsed.
import os
import sys
from dataclasses import dataclass, field
from pathlib import Path
import numpy as np
from datasets import Audio, concatenate_datasets, load_dataset
from huggingface_hub import get_full_repo_name
from transformers import HfArgumentParser, WhisperTokenizerFast
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: str = field(
default=None,
metadata={"help": "The name of the dataset to use (via the datasets library)."},
)
dataset_config_name: str = field(
default=None,
metadata={"help": "The configuration name of the dataset to use (via the datasets library)."},
)
dataset_split_name: str = field(
default=None,
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
},
)
label_column_name: str = field(
default="labels",
metadata={"help": "The name of the dataset column containing the labels in the dataset. Defaults to 'label'"},
)
text_column_name: str = field(
default="text",
metadata={
"help": "The name of the dataset column containing the text transcriptions in the dataset. Defaults to 'text'"
},
)
speaker_column_name: str = field(
default="speaker_id",
metadata={
"help": "The name of the dataset column containing the speaker ids in the dataset. Defaults to 'speaker_id'"
},
)
dataset_cache_dir: str = field(
default=None,
metadata={"help": "Path to cache directory for saving and loading datasets"},
)
preprocessing_num_workers: int = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
batch_size: int = field(
default=500,
metadata={"help": "Number of examples per batch provided to the preprocessing function."},
)
download_only: bool = field(
default=False,
metadata={"help": "Whether to only do data download and skip pre-processing."},
)
audio_column_name: str = field(
default="audio",
metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
)
max_duration_in_seconds: float = field(
default=20.0,
metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"},
)
sampling_rate: int = field(
default=16_000,
metadata={
"help": "Sampling rate at which to resample the audio data. Should be set to the same sampling rate as the target model."
},
)
max_samples: int = field(
default=None,
metadata={
"help": "For debugging purposes, truncate the number of examples in the dataset to this value if set."
},
)
output_dir: str = field(
default=None,
metadata={
"help": "Where to save the processed dataset to disk. If unspecified, uses a 'pretty' version of the "
"original dataset name. E.g. 'facebook/voxpopuli' will be saved under 'voxpopuli'."
},
)
push_to_hub: bool = field(
default=False,
metadata={"help": "Whether or not to push the processed dataset to the Hub."},
)
seed: int = field(
default=0,
metadata={"help": "RNG seed for reproducibility. Used during the final shuffling of the combined dataset."},
)
def convert_dataset_str_to_list(
dataset_names,
dataset_config_names,
splits=None,
label_column_names=None,
text_column_names=None,
speaker_column_names=None,
dataset_samples=None,
default_split="train",
):
if isinstance(dataset_names, str):
dataset_names = dataset_names.split("+")
dataset_config_names = dataset_config_names.split("+")
splits = splits.split("+") if splits is not None else None
label_column_names = label_column_names.split("+") if label_column_names is not None else None
text_column_names = text_column_names.split("+") if text_column_names is not None else None
speaker_column_names = speaker_column_names.split("+") if speaker_column_names is not None else None
dataset_samples = dataset_samples.split("+") if dataset_samples is not None else None
# basic checks to ensure we've got the right number of datasets/configs/splits/columns/probs
if len(dataset_names) != len(dataset_config_names):
raise ValueError(
f"Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and"
f" {len(dataset_config_names)} configs."
)
if splits is not None and len(splits) != len(dataset_names):
raise ValueError(
f"Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits."
)
if label_column_names is not None and len(label_column_names) != len(dataset_names):
raise ValueError(
f"Ensure one label column name is passed for each dataset, got {len(dataset_names)} datasets and"
f" {len(label_column_names)} label column names."
)
if text_column_names is not None and len(text_column_names) != len(dataset_names):
raise ValueError(
f"Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and"
f" {len(text_column_names)} text column names."
)
if speaker_column_names is not None and len(speaker_column_names) != len(dataset_names):
raise ValueError(
f"Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and"
f" {len(speaker_column_names)} speaker column names."
)
if dataset_samples is not None:
if len(dataset_samples) != len(dataset_names):
raise ValueError(
f"Ensure one sample is passed for each dataset, got {len(dataset_names)} datasets and "
f"{len(dataset_samples)} samples."
)
dataset_samples = [float(ds_sample) for ds_sample in dataset_samples]
else:
dataset_samples = [None] * len(dataset_names)
label_column_names = (
label_column_names if label_column_names is not None else ["labels" for _ in range(len(dataset_names))]
)
text_column_names = (
text_column_names if text_column_names is not None else ["text" for _ in range(len(dataset_names))]
)
speaker_column_names = (
speaker_column_names if speaker_column_names is not None else ["speaker_id" for _ in range(len(dataset_names))]
)
splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))]
dataset_names_dict = []
for i, ds_name in enumerate(dataset_names):
dataset_names_dict.append(
{
"name": ds_name,
"config": dataset_config_names[i],
"split": splits[i],
"label_column_name": label_column_names[i],
"text_column_name": text_column_names[i],
"speaker_column_name": speaker_column_names[i],
"samples": dataset_samples[i],
}
)
return dataset_names_dict
def main():
# 1. Parse input arguments
parser = HfArgumentParser(DataTrainingArguments)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
data_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))[0]
else:
data_args = parser.parse_args_into_dataclasses()[0]
dataset_names_dict = convert_dataset_str_to_list(
data_args.dataset_name,
data_args.dataset_config_name,
splits=data_args.dataset_split_name,
label_column_names=data_args.label_column_name,
text_column_names=data_args.text_column_name,
speaker_column_names=data_args.speaker_column_name,
)
# load whisper tokenizer for normalisation
sampling_rate = data_args.sampling_rate
tokenizer = WhisperTokenizerFast.from_pretrained("openai/whisper-tiny.en")
max_input_length = int(data_args.max_duration_in_seconds * sampling_rate)
batch_size = data_args.batch_size
preprocessing_num_workers = data_args.preprocessing_num_workers
all_vectorized_datasets = []
for dataset_dict in dataset_names_dict:
print(10 * "=", dataset_dict["name"], 10 * "=")
raw_datasets = load_dataset(
dataset_dict["name"],
dataset_dict["config"],
split=dataset_dict["split"],
cache_dir=data_args.dataset_cache_dir,
num_proc=data_args.preprocessing_num_workers,
)
if data_args.download_only:
continue
features = raw_datasets.column_names
if dataset_dict["label_column_name"] not in features:
raise ValueError(
f"--label_column_name {dataset_dict['label_column_name']} not found in dataset '{dataset_dict['name']}'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
f"{', '.join(features)}."
)
elif dataset_dict["label_column_name"] != "labels":
raw_datasets = raw_datasets.rename_column(dataset_dict["label_column_name"], "labels")
if dataset_dict["text_column_name"] not in features:
raise ValueError(
f"--text_column_name {dataset_dict['text_column_name']} not found in dataset '{dataset_dict['name']}'. "
"Make sure to set `--text_column_name` to the correct text column - one of "
f"{', '.join(features)}."
)
elif dataset_dict["text_column_name"] != "text":
raw_datasets = raw_datasets.rename_column(dataset_dict["text_column_name"], "text")
if dataset_dict["speaker_column_name"] not in features:
raise ValueError(
f"--speaker_column_name {dataset_dict['speaker_column_name']} not found in dataset '{dataset_dict['name']}'. "
"Make sure to set `--speaker_column_name` to the correct speaker id column - one of "
f"{', '.join(features)}."
)
elif dataset_dict["speaker_column_name"] != "speaker_id":
raw_datasets = raw_datasets.rename_column(dataset_dict["speaker_column_name"], "speaker_id")
raw_datasets = raw_datasets.remove_columns(
set(raw_datasets.features.keys()) - {"audio", "labels", "text", "speaker_id"}
)
if data_args.max_samples is not None:
raw_datasets = raw_datasets.select(range(data_args.max_samples))
raw_datasets = raw_datasets.cast_column(data_args.audio_column_name, Audio(sampling_rate=sampling_rate))
raw_datasets = raw_datasets.sort("speaker_id")
def filter_transcriptions(text):
normalized_text = tokenizer.normalize(text).strip()
return bool(normalized_text) and text.lower() != "ignore_time_segment_in_scoring"
raw_datasets = raw_datasets.filter(
filter_transcriptions, input_columns=["text"], desc="Filtering non-speech transcriptions"
)
def prepare_dataset(batch):
audio = [sample["array"] for sample in batch["audio"]]
input_lengths = [len(sample) for sample in audio]
concatenated_audio = []
concatenated_text = []
concatenated_speaker = []
concatenated_labels = []
audio_sample = audio[0]
text_sample = batch["text"][0]
label_sample = batch["labels"][0]
for idx in range(1, len(audio)):
prev_speaker = batch["speaker_id"][idx - 1]
speaker = batch["speaker_id"][idx]
if len(audio_sample) + input_lengths[idx] < max_input_length:
if speaker == prev_speaker:
# we have no information about whether the segments follow on sequentially
# so we just ensure the same speaker as we concatenate across files
audio_sample = np.append(audio_sample, audio[idx])
# extra spaces in the text transcription don't matter, since we only use it for the WER computation
text_sample += " " + batch["text"][idx]
else:
# segments do not follow sequentially, save the audio and start looping again
concatenated_audio.append(audio_sample)
concatenated_text.append(text_sample)
concatenated_labels.append(label_sample)
concatenated_speaker.append(speaker)
audio_sample = audio[idx]
text_sample = batch["text"][idx]
label_sample = batch["labels"][idx]
else:
# concatenated audio exceeds max length, save the audio and start looping again
concatenated_audio.append(audio_sample)
concatenated_text.append(text_sample)
concatenated_labels.append(label_sample)
concatenated_speaker.append(speaker)
audio_sample = audio[idx]
text_sample = batch["text"][idx]
label_sample = batch["labels"][idx]
batch["audio"] = [{"array": array, "sampling_rate": sampling_rate} for array in concatenated_audio]
batch["text"] = concatenated_text
batch["labels"] = concatenated_labels
batch["speaker_id"] = concatenated_speaker
return batch
raw_datasets = raw_datasets.map(
prepare_dataset,
batched=True,
batch_size=batch_size,
num_proc=preprocessing_num_workers,
desc="Concatenating dataset...",
)
pretty_name = dataset_dict["name"].split("/")[-1]
def postprocess_ids(speaker_id, idx):
formatted_idx = f"{pretty_name}-{speaker_id}-{idx}"
return {"id": formatted_idx}
raw_datasets = raw_datasets.map(
postprocess_ids,
input_columns=["speaker_id"],
with_indices=True,
desc="Setting sample idxs...",
num_proc=preprocessing_num_workers,
)
print(f"Final length {pretty_name}: ", len(raw_datasets))
# Re-format transcriptions and condition on prev as numpy arrays
raw_datasets = raw_datasets.with_format("np")
all_vectorized_datasets.append(raw_datasets)
all_vectorized_datasets = concatenate_datasets(all_vectorized_datasets)
dataset_features = all_vectorized_datasets.features.copy()
dataset_features["audio"] = Audio(sampling_rate=sampling_rate)
all_vectorized_datasets = all_vectorized_datasets.cast(
dataset_features, batch_size=batch_size, writer_batch_size=batch_size, num_proc=preprocessing_num_workers
)
all_vectorized_datasets = all_vectorized_datasets.shuffle(seed=data_args.seed)
all_vectorized_datasets.save_to_disk(data_args.output_dir)
repo_name = get_full_repo_name(Path(data_args.output_dir).absolute().name)
if data_args.push_to_hub:
all_vectorized_datasets.push_to_hub(repo_name, config_name="train", max_shard_size="1GB")
if __name__ == "__main__":
main()
import logging
import os
import shutil
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import torch
from accelerate import Accelerator
from accelerate.logging import get_logger
from datasets import DatasetDict, load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
HfArgumentParser,
)
logger = get_logger(__name__, log_level="INFO")
@dataclass
class ModelArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
model_name_or_path: str = field(
metadata={"help": "The name of the model to use (via the transformers library) for the prompt annotation."},
)
per_device_eval_batch_size: int = field(
metadata={"help": "The per-device batch size to use for inference."},
)
model_variant: str = field(
default=None,
metadata={"help": "If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. "},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
torch_dtype: Optional[str] = field(
default="float16",
metadata={
"help": (
"Floating-point format in which the model weights should be initialized"
" and the computations run. Choose one of `[float32, float16, bfloat16]`."
)
},
)
attn_implementation: Optional[str] = field(
default="sdpa",
metadata={"help": "Which attn type to use: ['eager', 'sdpa', 'flash_attention_2']"},
)
load_in_8bit: Optional[bool] = field(
default=False, metadata={"help": "Whether to use 8-bit precision for inference."}
)
load_in_4bit: Optional[bool] = field(
default=False, metadata={"help": "Whether to use 4-bit precision for inference."}
)
bnb_4bit_quant_type: Optional[str] = field(
default="nf4", metadata={"help": "precise the quantization type (fp4 or nf4)"}
)
use_bnb_nested_quant: Optional[bool] = field(default=False, metadata={"help": "use nested quantization"})
trust_remote_code: Optional[bool] = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option "
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
"execute code present on the Hub on your local machine."
)
},
)
use_fast_tokenizer: Optional[bool] = field(
default=True, metadata={"help": "Use fast tokenizer for encoding/decoding input ids"}
)
token: Optional[bool] = field(
default=True,
metadata={
"help": "Whether or not to use an authentication token when loading/uploading from the Hugging Face Hub"
},
)
do_sample: Optional[bool] = field(default=True, metadata={"help": "Whether to use sampling mode for generation"})
temperature: Optional[float] = field(default=0.6, metadata={"help": "Temperature for sampling-based generation"})
max_new_tokens: Optional[int] = field(
default=256, metadata={"help": "Maximum number of new tokens during generation"}
)
compile_generate: Optional[bool] = field(
default=False, metadata={"help": "Whether to compile the forward pass (not sampling) in generate."}
)
@dataclass
class DataArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
output_dir: str = field(
metadata={
"help": "Where to save the processed dataset to disk. If unspecified, uses a 'pretty' version of the "
"original dataset name. E.g. 'facebook/voxpopuli' will be saved under 'voxpopuli'."
},
)
dataset_name: str = field(
default=None,
metadata={"help": "The name of the dataset to use (via the datasets library)"},
)
dataset_config_name: Optional[str] = field(
default=None,
metadata={"help": "The configuration name of the dataset to use (via the datasets library)."},
)
dataset_split_name: Optional[str] = field(
default=None,
metadata={"help": "The split name of the dataset to use (via the datasets library)."},
)
dataset_cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to cache directory for saving and loading datasets"},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={"help": "Maximum number of samples for generation - use for debugging purposes."},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached training and evaluation sets"},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
dataloader_num_workers: Optional[int] = field(
default=0,
metadata={"help": "The number of processes to use for the dataloader."},
)
push_to_hub: Optional[bool] = field(
default=False,
metadata={"help": "Whether or not to push the processed dataset to the Hub."},
)
hub_dataset_id: Optional[str] = field(
default=None,
metadata={"help": "Repository namespace if pushing to the Hugging Face Hub."},
)
overwrite_output_dir: Optional[bool] = field(
default=False,
metadata={"help": "Overwrite the content of the output directory each time the script is run."},
)
def __post_init__(self):
if self.push_to_hub and self.hub_dataset_id is None:
raise ValueError("You must specify the `hub_dataset_id` when setting `--push_to_hub=True`")
def get_quantization_config(model_args: ModelArguments) -> Union[BitsAndBytesConfig, None]:
if model_args.load_in_4bit:
compute_dtype = torch.float16
if model_args.torch_dtype not in {"auto", None}:
compute_dtype = getattr(torch, model_args.torch_dtype)
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_quant_type=model_args.bnb_4bit_quant_type,
bnb_4bit_use_double_quant=model_args.use_bnb_nested_quant,
)
elif model_args.load_in_8bit:
quantization_config = BitsAndBytesConfig(
load_in_8bit=True,
)
else:
quantization_config = None
return quantization_config
def get_current_device() -> int:
"""Get the current device. For GPU we return the local process index to enable multiple GPU training."""
return Accelerator().local_process_index if torch.cuda.is_available() else "cpu"
def get_kbit_device_map() -> Union[Dict[str, int], None]:
"""Useful for running inference with quantized models by setting `device_map=get_peft_device_map()`"""
return {"": get_current_device()} if torch.cuda.is_available() else None
@dataclass
class DataCollatorWithPadding:
"""
Data collator that will dynamically pad the inputs received to the longest sequence in the batch.
"""
tokenizer: Any
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lengths and need
# different padding methods
input_ids = {"input_ids": [feature["input_ids"] for feature in features]}
batch = self.tokenizer.pad(input_ids, return_tensors="pt", padding="longest", return_attention_mask=True)
return batch
# TODO(SG): add accent keyword
PROMPT = """You will be given six descriptive keywords related to an audio sample of a person's speech. These keywords include:
1. The gender (e.g., male, female)
2. The level of reverberation (e.g., very roomy sounding, quite roomy sounding, slightly roomy sounding, moderate reverberation, slightly confined sounding, quite confined sounding, very confined sounding)
3. The amount of noise the sample (e.g., very noisy, quite noisy, slightly noisy, moderate ambient sound, slightly clear, quite clear, very clear)
4. The tone of the speaker's voice (e.g., very monotone, quite monotone, slightly monotone, moderate intonation, slightly expressive, quite expressive, very expressive)
5. The pace of the speaker's delivery (e.g., very slowly, quite slowly, slightly slowly, moderate speed, slightly fast, quite fast, very fast)
6. The pitch of the speaker's voice (e.g., very low pitch, quite low pitch, slightly low pitch, moderate pitch, slightly high pitch, quite high pitch, very high pitch)
Your task is to create a text description using these keywords that accurately describes the speech sample while ensuring the description remains grammatically correct and easy to understand. You should rearrange the keyword order as necessary, and substitute synonymous terms where appropriate. If the amount of noise is 'very noisy' and the level of reverberation is 'very roomy sounding', include terms like 'very bad recording' in the description. Likewise, if the amount of noise is 'very clear' and the level of reverberation is 'very confined sounding', include terms like 'very good recording' in the description. Otherwise, do not add extra details beyond what has been provided, and only return the generated description.
For example, given the following keywords: 'female', 'slightly roomy sounding', 'slightly noisy', 'very expressive', 'slightly low pitch', 'very slowly', a valid description would be: 'a woman with a deep voice speaks slowly but has an animated delivery in an echoey room with some background noise'.
For the keywords: '[gender]', '[reverberation]', '[noise]', '[speech_monotony]', '[pitch]', '[speaking_rate]', the corresponding description is:"
"""
def main():
# 1. Parse input arguments
parser = HfArgumentParser((ModelArguments, DataArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args = parser.parse_args_into_dataclasses()
# 2. Setup logging
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
accelerator = Accelerator()
if data_args.overwrite_output_dir and os.path.exists(data_args.output_dir) and os.path.isdir(data_args.output_dir):
logger.info("Cleaning output dir from previous run...")
shutil.rmtree(data_args.output_dir)
# 3. Load annotated dataset
logger.info("*** Load annotated dataset ***")
if data_args.dataset_split_name is not None:
raw_datasets = DatasetDict()
data_splits = data_args.dataset_split_name.split("+")
# load on a split-wise basis
for split in data_splits:
raw_datasets[split] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=split,
cache_dir=model_args.cache_dir,
token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
)
else:
# load all splits for annotation
raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
token=model_args.token,
num_proc=data_args.preprocessing_num_workers,
)
raw_datasets_features = set(raw_datasets[next(iter(raw_datasets))].features.keys())
if data_args.max_eval_samples is not None:
for split in raw_datasets:
raw_datasets[split] = raw_datasets[split].select(range(data_args.max_eval_samples))
# TODO(SG): add accent
EXPECTED_COLUMNS = {"gender", "pitch", "noise", "reverberation", "speech_monotony", "speaking_rate"}
if not EXPECTED_COLUMNS.issubset(raw_datasets_features):
missing_columns = EXPECTED_COLUMNS - raw_datasets_features
raise ValueError(
f"Missing columns {missing_columns} from the dataset features. Got dataset features {raw_datasets_features}"
)
# 4. Load pre-trained model
logger.info("*** Load pretrained model ***")
torch_dtype = (
model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype)
)
quantization_config = get_quantization_config(model_args)
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
revision=model_args.model_revision,
variant=model_args.model_variant,
trust_remote_code=model_args.trust_remote_code,
attn_implementation=model_args.attn_implementation,
torch_dtype=torch_dtype,
device_map=get_kbit_device_map() if quantization_config is not None else None,
quantization_config=quantization_config,
low_cpu_mem_usage=True,
token=model_args.token,
).eval()
if model_args.compile_generate:
if not callable(getattr(model, "_setup_cache", None)):
raise ValueError(
f"Static k/v cache is not compatible with the model {model.__class__.__name__}. Set `--compile_generate=False"
"for dynamic k/v cache"
)
model.generation_config.cache_implementation = "static"
model._forward = model.forward
compiled_forward = torch.compile(model.forward)
def compiled(func, input_ids, **kwargs):
return func(input_ids, **kwargs)
def call(input_ids, **kwargs):
if input_ids.shape[-1] == 1:
return compiled(compiled_forward, input_ids, **kwargs)
return model._forward(input_ids, **kwargs)
model.forward = call
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
revision=model_args.model_revision,
trust_remote_code=model_args.trust_remote_code,
use_fast=model_args.use_fast_tokenizer,
padding_side="left",
)
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.bos_token_id
model.generation_config.pad_token_id = model.generation_config.eos_token_id
def prepare_dataset(sample):
sample_prompt = PROMPT
for key in EXPECTED_COLUMNS:
sample_prompt = sample_prompt.replace(f"[{key}]", sample[key])
sample_prompt = [{"role": "user", "content": sample_prompt}]
token_ids = tokenizer.apply_chat_template(sample_prompt)
sample["input_ids"] = token_ids
return sample
with accelerator.main_process_first():
vectorized_datasets = raw_datasets.map(
prepare_dataset, num_proc=data_args.preprocessing_num_workers, desc="Preparing prompts"
)
# Prepare everything with our `accelerator`
model = accelerator.prepare(model)
data_collator = DataCollatorWithPadding(tokenizer)
def generate_step(batch):
output_ids = accelerator.unwrap_model(model).generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
do_sample=model_args.do_sample,
temperature=model_args.temperature,
max_new_tokens=model_args.max_new_tokens,
)
output_ids = accelerator.pad_across_processes(output_ids, dim=1, pad_index=tokenizer.pad_token_id)
return output_ids
def postprocess_dataset(sample):
prompt_text = tokenizer.decode(sample["input_ids"], skip_special_tokens=True)
generated_text = tokenizer.decode(sample["generated_ids"], skip_special_tokens=True)
sample["text_description"] = generated_text[len(prompt_text) :]
return sample
for split in vectorized_datasets:
data_loader = DataLoader(
vectorized_datasets[split],
batch_size=model_args.per_device_eval_batch_size,
collate_fn=data_collator,
num_workers=data_args.dataloader_num_workers,
pin_memory=True,
)
data_loader = accelerator.prepare(data_loader)
all_generated_ids = []
for batch in tqdm(data_loader, disable=not accelerator.is_local_main_process):
generated_ids = generate_step(batch)
generated_ids = accelerator.gather_for_metrics(generated_ids)
all_generated_ids.extend(generated_ids.cpu().numpy())
vectorized_datasets[split] = vectorized_datasets[split].add_column("generated_ids", all_generated_ids)
if accelerator.is_main_process:
vectorized_datasets[split] = vectorized_datasets[split].map(
postprocess_dataset,
num_proc=data_args.preprocessing_num_workers,
desc="Postprocessing dataset",
remove_columns=["input_ids", "generated_ids"],
)
if accelerator.is_main_process:
vectorized_datasets.save_to_disk(data_args.output_dir)
if data_args.push_to_hub:
vectorized_datasets.push_to_hub(
data_args.hub_dataset_id,
config_name=data_args.dataset_config_name if data_args.dataset_config_name is not None else "default",
token=model_args.token,
)
accelerator.end_training()
if __name__ == "__main__":
main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment