{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "c12f1025-2445-43f0-9c76-c8f4bff2502a", "metadata": {}, "outputs": [], "source": [ "%%capture\n", "!pip install datasets\n", "!pip install --upgrade transformers\n", "!pip install torchaudio\n", "!pip install huggingface_hub\n", "!pip install jiwer evaluate wandb\n", "!pip install librosa\n", "!pip install accelerate -U" ] }, { "cell_type": "code", "execution_count": null, "id": "3d55e844-cb32-4b6b-a073-5a0f5c393242", "metadata": { "scrolled": true }, "outputs": [], "source": [ "!pip install transformers[torch]\n", "!pip install accelerate==0.26.0" ] }, { "cell_type": "code", "execution_count": 1, "id": "0a8323f5-3d10-4ced-8cee-39497ad1ab16", "metadata": {}, "outputs": [], "source": [ "import os\n", "import re\n", "import unicodedata\n", "import json\n", "import numpy as np\n", "import pandas as pd\n", "import wandb\n", "import librosa\n", "\n", "from datasets import load_dataset, DatasetDict, Audio\n", "from transformers import (\n", " Wav2Vec2CTCTokenizer,\n", " Wav2Vec2FeatureExtractor,\n", " Wav2Vec2Processor,\n", " Wav2Vec2ForCTC,\n", " TrainingArguments,\n", " Trainer,\n", " EarlyStoppingCallback,\n", ")\n", "import evaluate\n", "import torch\n", "from dataclasses import dataclass\n", "from typing import Dict, List, Union\n" ] }, { "cell_type": "code", "execution_count": 2, "id": "c5410b60-2f77-4d9e-a5c8-df57c6abd31b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Cpu count: 96\n", "Num proc: 48\n", "Num dataloaders: 48\n" ] } ], "source": [ "num_proc = os.cpu_count()//2\n", "num_dataloaders = os.cpu_count()//2\n", "\n", "print(f\"Cpu count: {os.cpu_count()}\\nNum proc: {num_proc}\\nNum dataloaders: {num_dataloaders}\")" ] }, { "cell_type": "code", "execution_count": null, "id": "b3770500-8e3f-4e75-9cf0-31af2e942cdb", "metadata": {}, "outputs": [], "source": [ "from huggingface_hub import notebook_login\n", "\n", "notebook_login()" ] }, { "cell_type": "code", "execution_count": 3, "id": "ccdeb15e-b579-437e-ae43-17941888a04e", "metadata": {}, "outputs": [], "source": [ "from datasets import load_dataset, Audio\n", "\n", "data_files = {\n", " 'train': 'train_data.csv',\n", " 'validation': 'validation_data.csv',\n", " 'test': 'test_data.csv'\n", "}\n", "\n", "dataset = load_dataset('csv', data_files=data_files)\n" ] }, { "cell_type": "code", "execution_count": 4, "id": "ec4be5eb-bac6-470d-b223-ff48fa6afa2f", "metadata": {}, "outputs": [], "source": [ "columns_to_keep = ['audio_path', 'sentence', 'audio_len', 'transcript_len']\n", "dataset = dataset.map(lambda batch: {k: batch[k] for k in columns_to_keep}, remove_columns=dataset['train'].column_names)\n" ] }, { "cell_type": "code", "execution_count": 5, "id": "f7d50095-672b-4e34-908b-400d8beed076", "metadata": {}, "outputs": [], "source": [ "dataset = dataset.rename_column('audio_path', 'audio')\n", "dataset = dataset.cast_column('audio', Audio(sampling_rate=16_000))" ] }, { "cell_type": "code", "execution_count": 6, "id": "0254701e-37d1-471f-9399-9e44eb5ca2ea", "metadata": {}, "outputs": [], "source": [ "train_dataset = dataset['train']\n", "eval_dataset = dataset['validation']\n", "test_dataset = dataset['test']\n" ] }, { "cell_type": "code", "execution_count": null, "id": "eef15786-00b0-4963-bd9b-ee668b62702c", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 7, "id": "efa4f50a-98de-4a64-a284-a17a6222737d", "metadata": {}, "outputs": [], "source": [ "def preprocess_text(batch):\n", " # Convert to lowercase\n", " batch['sentence'] = batch['sentence'].lower()\n", " \n", " # Normalize text\n", " batch['sentence'] = unicodedata.normalize('NFKC', batch['sentence'])\n", " batch['sentence'] = re.sub(r\"[\\’\\ʻ\\ʼ\\ʽ\\‘]\", \"'\", batch['sentence'])\n", " \n", " # Remove punctuation and special characters (except apostrophes)\n", " batch['sentence'] = re.sub(r\"[^\\w\\s']\", '', batch['sentence'])\n", " batch['sentence'] = re.sub(r'_', ' ', batch['sentence'])\n", " \n", " # Remove excessive whitespace\n", " batch['sentence'] = ' '.join(batch['sentence'].split())\n", " \n", " return batch\n", "\n", "\n", "train_dataset = train_dataset.map(preprocess_text)\n", "eval_dataset = eval_dataset.map(preprocess_text)\n", "test_dataset = test_dataset.map(preprocess_text)" ] }, { "cell_type": "code", "execution_count": 8, "id": "34d9a64b-0ba6-44df-a448-341fe2569311", "metadata": {}, "outputs": [], "source": [ "def get_len(batch):\n", " # Compute audio length if not already computed\n", " if batch.get('audio_len') is None or batch['audio_len'] == 0.0:\n", " audio = batch['audio']\n", " audio_len = librosa.get_duration(y=audio['array'], sr=audio['sampling_rate'])\n", " batch['audio_len'] = audio_len\n", " \n", " # Compute transcript length if not already computed\n", " if batch.get('transcript_len') is None or batch['transcript_len'] == 0:\n", " batch['transcript_len'] = len(batch['sentence'])\n", " \n", " # Compute length ratio\n", " batch['len_ratio'] = float(batch['audio_len']) / float(batch['transcript_len']) if batch['transcript_len'] > 0 else 0.0\n", " \n", " # Number of feature vectors (assuming 20ms frame shift)\n", " batch['num_feature_vecs'] = int(np.round(batch['audio_len'] * 1000 / 20))\n", " \n", " return batch\n", "\n", "\n", "train_dataset = train_dataset.map(get_len)\n", "eval_dataset = eval_dataset.map(get_len)\n", "test_dataset = test_dataset.map(get_len)\n" ] }, { "cell_type": "code", "execution_count": 9, "id": "2a69b4ee-a441-4696-aded-5f5c71db8b22", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Train dataset size: 3151 samples\n", "Validation dataset size: 647 samples\n", "Test dataset size: 648 samples\n", "Total training audio duration: 20.00 hours\n", "Total validation audio duration: 4.10 hours\n", "Total test audio duration: 4.06 hours\n" ] } ], "source": [ "print(f\"Train dataset size: {len(train_dataset)} samples\")\n", "print(f\"Validation dataset size: {len(eval_dataset)} samples\")\n", "print(f\"Test dataset size: {len(test_dataset)} samples\")\n", "\n", "print(f\"Total training audio duration: {sum(train_dataset['audio_len']) / 3600:.2f} hours\")\n", "print(f\"Total validation audio duration: {sum(eval_dataset['audio_len']) / 3600:.2f} hours\")\n", "print(f\"Total test audio duration: {sum(test_dataset['audio_len']) / 3600:.2f} hours\")\n" ] }, { "cell_type": "code", "execution_count": 10, "id": "cbc97db6-4e44-4aea-8e8d-8575785cdf84", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Alphabet: '169abcdefghijklmnopqrstuvwyzàŋ\n" ] } ], "source": [ "def extract_all_chars(batch):\n", " all_text = ' '.join(batch['sentence'])\n", " vocab = list(set(all_text))\n", " return {'vocab': [vocab]}\n", "\n", "vocab_list = train_dataset.map(\n", " extract_all_chars,\n", " batched=True,\n", " batch_size=-1,\n", " remove_columns=train_dataset.column_names\n", ")\n", "\n", "vocab_set = set()\n", "for vocab in vocab_list['vocab']:\n", " vocab_set.update(vocab)\n", "\n", "# Ensure space character is included\n", "if ' ' not in vocab_set:\n", " vocab_set.add(' ')\n", "\n", "alphabet = ''.join(sorted(vocab_set))\n", "print(f\"Alphabet: {alphabet}\")\n" ] }, { "cell_type": "code", "execution_count": 11, "id": "ac715ca5-b67c-48fd-910c-0cde68efe5b3", "metadata": {}, "outputs": [], "source": [ "vocab_dict = {char: idx for idx, char in enumerate(sorted(vocab_set))}\n", "\n", "# Replace space with word delimiter token '|'\n", "vocab_dict['|'] = vocab_dict[' ']\n", "del vocab_dict[' ']\n", "\n", "# Add special tokens\n", "vocab_dict['[UNK]'] = len(vocab_dict)\n", "vocab_dict['[PAD]'] = len(vocab_dict)\n", "\n", "# Save the vocabulary dictionary to a JSON file\n", "with open('vocab.json', 'w') as vocab_file:\n", " json.dump(vocab_dict, vocab_file)\n" ] }, { "cell_type": "code", "execution_count": 12, "id": "cc3d9d11-9e2d-4d5e-838a-f4ff0da2f6e9", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/usr/local/lib/python3.10/dist-packages/transformers/tokenization_utils_base.py:1617: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be deprecated in transformers v4.45, and will be then set to `False` by default. For more details check this issue: https://github.com/huggingface/transformers/issues/31884\n", " warnings.warn(\n" ] } ], "source": [ "tokenizer = Wav2Vec2CTCTokenizer(\n", " 'vocab.json',\n", " unk_token='[UNK]',\n", " pad_token='[PAD]',\n", " word_delimiter_token='|'\n", ")\n", "\n", "feature_extractor = Wav2Vec2FeatureExtractor(\n", " feature_size=1,\n", " sampling_rate=16_000,\n", " padding_value=0.0,\n", " do_normalize=True,\n", " return_attention_mask=True\n", ")\n", "\n", "processor = Wav2Vec2Processor(\n", " feature_extractor=feature_extractor,\n", " tokenizer=tokenizer\n", ")\n" ] }, { "cell_type": "code", "execution_count": 13, "id": "4968b962-9074-48e8-a073-99cea7a914f9", "metadata": {}, "outputs": [], "source": [ "def prepare_dataset(batch):\n", " # Process audio\n", " audio = batch['audio']\n", " batch['input_values'] = processor(\n", " audio['array'],\n", " sampling_rate=audio['sampling_rate']\n", " ).input_values[0]\n", " \n", " # Process transcript\n", " batch['labels'] = processor(\n", " text=batch['sentence']\n", " ).input_ids\n", " return batch\n" ] }, { "cell_type": "code", "execution_count": 14, "id": "1247f52a-29d3-486b-9636-5065cade7a9a", "metadata": {}, "outputs": [], "source": [ "train_dataset = train_dataset.map(prepare_dataset, remove_columns=['audio', 'sentence'])\n", "eval_dataset = eval_dataset.map(prepare_dataset, remove_columns=['audio', 'sentence'])\n", "test_dataset = test_dataset.map(prepare_dataset, remove_columns=['audio', 'sentence'])\n" ] }, { "cell_type": "code", "execution_count": 15, "id": "298543b1-3795-4c03-8a4e-4802d0552f03", "metadata": {}, "outputs": [], "source": [ "@dataclass\n", "class DataCollatorCTCWithPadding:\n", " processor: Wav2Vec2Processor\n", " padding: Union[bool, str] = True\n", "\n", " def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n", " # Split inputs and labels\n", " input_features = [{'input_values': feature['input_values']} for feature in features]\n", " label_features = [{'input_ids': feature['labels']} for feature in features]\n", "\n", " # Pad inputs\n", " batch = self.processor.pad(\n", " input_features,\n", " padding=self.padding,\n", " return_tensors='pt'\n", " )\n", "\n", " # Pad labels\n", " labels_batch = self.processor.pad(\n", " labels=label_features,\n", " padding=self.padding,\n", " return_tensors='pt'\n", " )\n", "\n", " # Replace padding with -100 to ignore loss calculation for padding\n", " labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), -100)\n", "\n", " batch['labels'] = labels\n", "\n", " return batch\n", "\n", "data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)\n" ] }, { "cell_type": "code", "execution_count": 16, "id": "711c51ad-266b-421e-80aa-0c631a72d4e2", "metadata": {}, "outputs": [], "source": [ "wer_metric = evaluate.load('wer')\n", "cer_metric = evaluate.load('cer')\n" ] }, { "cell_type": "code", "execution_count": 17, "id": "120b8c11-16e4-4cce-9dd3-fd31684c6959", "metadata": {}, "outputs": [], "source": [ "def compute_metrics(pred):\n", " pred_logits = pred.predictions\n", " pred_ids = np.argmax(pred_logits, axis=-1)\n", "\n", " # Replace -100 in labels as we can't decode them\n", " label_ids = pred.label_ids\n", " label_ids[label_ids == -100] = processor.tokenizer.pad_token_id\n", "\n", " # Decode predictions and references\n", " pred_str = processor.batch_decode(pred_ids)\n", " label_str = processor.batch_decode(label_ids, group_tokens=False)\n", "\n", " wer = wer_metric.compute(predictions=pred_str, references=label_str)\n", " cer = cer_metric.compute(predictions=pred_str, references=label_str)\n", "\n", " return {'wer': wer, 'cer': cer}\n" ] }, { "cell_type": "code", "execution_count": 18, "id": "a1fa4aed-40f8-451d-aea2-9dd200b2d215", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/usr/local/lib/python3.10/dist-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage()\n", " return self.fget.__get__(instance, owner)()\n", "Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/wav2vec2-xls-r-300m and are newly initialized: ['lm_head.bias', 'lm_head.weight']\n", "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" ] } ], "source": [ "model_checkpoint = \"facebook/wav2vec2-xls-r-300m\" # You can use a different checkpoint if desired\n", "\n", "model = Wav2Vec2ForCTC.from_pretrained(\n", " model_checkpoint,\n", " attention_dropout=0.1,\n", " hidden_dropout=0.1,\n", " feat_proj_dropout=0.0,\n", " mask_time_prob=0.05,\n", " layerdrop=0.1,\n", " ctc_loss_reduction='mean',\n", " pad_token_id=processor.tokenizer.pad_token_id,\n", " vocab_size=len(processor.tokenizer),\n", ")\n" ] }, { "cell_type": "code", "execution_count": 19, "id": "dc4e2945-ad0d-486b-a8a3-bb712ad9a63c", "metadata": {}, "outputs": [], "source": [ "model.freeze_feature_encoder()\n" ] }, { "cell_type": "code", "execution_count": 20, "id": "42fecc4a-9eaa-49a8-8b2a-1087dc50d04c", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "20" ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "round(sum(train_dataset['audio_len']) / 3600)" ] }, { "cell_type": "code", "execution_count": 21, "id": "d26ab602-0247-4e5c-8362-e01ea7de41b0", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Using wandb-core as the SDK backend. Please refer to https://wandb.me/wandb-core for more information.\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33msulaiman-kagumire\u001b[0m (\u001b[33masr-africa-research-team\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" ] }, { "data": { "text/html": [ "Tracking run with wandb version 0.18.3" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Run data is saved locally in /workspace/wandb/run-20241006_174814-89bkm5a5" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Syncing run wav2vec2-xls-r-300m-yogera-lg-20hrs-v1 to Weights & Biases (docs)
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View project at https://wandb.ai/asr-africa-research-team/ASR%20Africa" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View run at https://wandb.ai/asr-africa-research-team/ASR%20Africa/runs/89bkm5a5" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "" ], "text/plain": [ "" ] }, "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Define your dataset and experiment details\n", "dataset = 'yogera' # Replace with actual dataset name\n", "language = 'lg' # Replace with the language you are working with\n", "sample_hours = round(sum(train_dataset['audio_len']) / 3600) # Calculate total training hours\n", "version = 'v1' # Version of your fine-tuned model\n", "batch_size = 8 # Adjust based on your GPU capacity\n", "grad_acc = 1\n", "eval_batch_size = batch_size // 2\n", "epochs = 100 # Adjust as needed\n", "output_dir = f\"{model_checkpoint.split('/')[-1]}-{dataset}-{language}-{sample_hours}hrs-{version}\"\n", "\n", "# Initialize WandB\n", "wandb.init(\n", " project=\"ASR Africa\",\n", " entity=\"asr-africa-research-team\",\n", " name=output_dir,\n", ")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "28e0779e-056a-46d9-8ff3-ce4411a8f005", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 31, "id": "33288226-5d7d-4a67-bee1-8fd3bc0b6896", "metadata": {}, "outputs": [], "source": [ "training_args = TrainingArguments(\n", " output_dir=output_dir,\n", " group_by_length=True,\n", " per_device_train_batch_size=batch_size,\n", " per_device_eval_batch_size=eval_batch_size,\n", " gradient_accumulation_steps=grad_acc,\n", " eval_strategy=\"epoch\",\n", " logging_strategy=\"epoch\",\n", " save_strategy=\"epoch\",\n", " num_train_epochs=epochs,\n", " gradient_checkpointing=True,\n", " fp16=True,\n", " learning_rate=1e-4,\n", " lr_scheduler_type='linear',\n", " warmup_ratio=0.1,\n", " save_total_limit=2,\n", " load_best_model_at_end=True,\n", " metric_for_best_model=\"wer\",\n", " greater_is_better=False,\n", " optim='adamw_torch',\n", " push_to_hub=True,\n", " hub_model_id=f\"asr-africa/{output_dir}\",\n", " hub_private_repo=True,\n", " dataloader_num_workers=num_dataloaders,\n", ")" ] }, { "cell_type": "code", "execution_count": 32, "id": "14ae6f20-47bc-4309-abb2-f5034828143b", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n" ] } ], "source": [ "trainer = Trainer(\n", " model=model,\n", " data_collator=data_collator,\n", " args=training_args,\n", " compute_metrics=compute_metrics,\n", " train_dataset=train_dataset,\n", " eval_dataset=eval_dataset,\n", " tokenizer=processor.feature_extractor,\n", " callbacks=[\n", " EarlyStoppingCallback(\n", " early_stopping_patience=10, # Stop if no improvement after 10 evaluation steps\n", " early_stopping_threshold=1e-3 # Stop if improvement is less than 0.001\n", " )\n", " ],\n", ")\n" ] }, { "cell_type": "code", "execution_count": 33, "id": "15b96745-c534-4638-84b3-2b75037bc81c", "metadata": { "scrolled": true }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[33mWARNING\u001b[0m The `run_name` is currently set to the same value as `TrainingArguments.output_dir`. If this was not intended, please specify a different run name by setting the `TrainingArguments.run_name` parameter.\n" ] }, { "data": { "text/html": [ "\n", "
\n", " \n", " \n", " [ 561/39400 15:15 < 17:40:28, 0.61 it/s, Epoch 1.42/100]\n", "
\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
EpochTraining LossValidation LossWerCer
113.1219004.3820241.0000001.000000

" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "ename": "KeyboardInterrupt", "evalue": "", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[33], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mtrainer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", "File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/trainer.py:2043\u001b[0m, in \u001b[0;36mTrainer.train\u001b[0;34m(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)\u001b[0m\n\u001b[1;32m 2040\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 2041\u001b[0m \u001b[38;5;66;03m# Disable progress bars when uploading models during checkpoints to avoid polluting stdout\u001b[39;00m\n\u001b[1;32m 2042\u001b[0m hf_hub_utils\u001b[38;5;241m.\u001b[39mdisable_progress_bars()\n\u001b[0;32m-> 2043\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43minner_training_loop\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2044\u001b[0m \u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2045\u001b[0m \u001b[43m \u001b[49m\u001b[43mresume_from_checkpoint\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mresume_from_checkpoint\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2046\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrial\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtrial\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2047\u001b[0m \u001b[43m \u001b[49m\u001b[43mignore_keys_for_eval\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mignore_keys_for_eval\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2048\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2049\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[1;32m 2050\u001b[0m hf_hub_utils\u001b[38;5;241m.\u001b[39menable_progress_bars()\n", "File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/trainer.py:2388\u001b[0m, in \u001b[0;36mTrainer._inner_training_loop\u001b[0;34m(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)\u001b[0m\n\u001b[1;32m 2385\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcontrol \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcallback_handler\u001b[38;5;241m.\u001b[39mon_step_begin(args, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcontrol)\n\u001b[1;32m 2387\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39maccelerator\u001b[38;5;241m.\u001b[39maccumulate(model):\n\u001b[0;32m-> 2388\u001b[0m tr_loss_step \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtraining_step\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2390\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[1;32m 2391\u001b[0m args\u001b[38;5;241m.\u001b[39mlogging_nan_inf_filter\n\u001b[1;32m 2392\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_torch_xla_available()\n\u001b[1;32m 2393\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m (torch\u001b[38;5;241m.\u001b[39misnan(tr_loss_step) \u001b[38;5;129;01mor\u001b[39;00m torch\u001b[38;5;241m.\u001b[39misinf(tr_loss_step))\n\u001b[1;32m 2394\u001b[0m ):\n\u001b[1;32m 2395\u001b[0m \u001b[38;5;66;03m# if loss is nan or inf simply add the average of previous logged losses\u001b[39;00m\n\u001b[1;32m 2396\u001b[0m tr_loss \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m tr_loss \u001b[38;5;241m/\u001b[39m (\u001b[38;5;241m1\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate\u001b[38;5;241m.\u001b[39mglobal_step \u001b[38;5;241m-\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_globalstep_last_logged)\n", "File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/trainer.py:3518\u001b[0m, in \u001b[0;36mTrainer.training_step\u001b[0;34m(***failed resolving arguments***)\u001b[0m\n\u001b[1;32m 3516\u001b[0m scaled_loss\u001b[38;5;241m.\u001b[39mbackward()\n\u001b[1;32m 3517\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 3518\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43maccelerator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mloss\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3520\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m loss\u001b[38;5;241m.\u001b[39mdetach() \u001b[38;5;241m/\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39mgradient_accumulation_steps\n", "File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/accelerate/accelerator.py:2192\u001b[0m, in \u001b[0;36mAccelerator.backward\u001b[0;34m(self, loss, **kwargs)\u001b[0m\n\u001b[1;32m 2190\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m\n\u001b[1;32m 2191\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mscaler \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m-> 2192\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mscaler\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mscale\u001b[49m\u001b[43m(\u001b[49m\u001b[43mloss\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2193\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m learning_rate \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhas_lomo_optimizer:\n\u001b[1;32m 2194\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlomo_backward(loss, learning_rate)\n", "File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/_tensor.py:492\u001b[0m, in \u001b[0;36mTensor.backward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m 482\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m 483\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[1;32m 484\u001b[0m Tensor\u001b[38;5;241m.\u001b[39mbackward,\n\u001b[1;32m 485\u001b[0m (\u001b[38;5;28mself\u001b[39m,),\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 490\u001b[0m inputs\u001b[38;5;241m=\u001b[39minputs,\n\u001b[1;32m 491\u001b[0m )\n\u001b[0;32m--> 492\u001b[0m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautograd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 493\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs\u001b[49m\n\u001b[1;32m 494\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", "File \u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/autograd/__init__.py:251\u001b[0m, in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m 246\u001b[0m retain_graph \u001b[38;5;241m=\u001b[39m create_graph\n\u001b[1;32m 248\u001b[0m \u001b[38;5;66;03m# The reason we repeat the same comment below is that\u001b[39;00m\n\u001b[1;32m 249\u001b[0m \u001b[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[1;32m 250\u001b[0m \u001b[38;5;66;03m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[0;32m--> 251\u001b[0m \u001b[43mVariable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execution_engine\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_backward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# Calls into the C++ engine to run the backward pass\u001b[39;49;00m\n\u001b[1;32m 252\u001b[0m \u001b[43m \u001b[49m\u001b[43mtensors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 253\u001b[0m \u001b[43m \u001b[49m\u001b[43mgrad_tensors_\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 254\u001b[0m \u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 255\u001b[0m \u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 256\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 257\u001b[0m \u001b[43m \u001b[49m\u001b[43mallow_unreachable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 258\u001b[0m \u001b[43m \u001b[49m\u001b[43maccumulate_grad\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 259\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", "\u001b[0;31mKeyboardInterrupt\u001b[0m: " ] } ], "source": [ "trainer.train()" ] }, { "cell_type": "code", "execution_count": null, "id": "9fad91de-1d1a-4de9-9049-e8048a179746", "metadata": {}, "outputs": [], "source": [ "from huggingface_hub import HfApi, HfFolder\n", "\n", "# Your Hugging Face credentials\n", "repo_id = \"sulaimank/yog-transcribed\" # Replace with your actual Hugging Face repository\n", "zip_file = \"transcribed_wavs.zip\"\n", "\n", "# Initialize API and upload\n", "api = HfApi()\n", "api.upload_file(\n", " path_or_fileobj=zip_file,\n", " path_in_repo=zip_file, # Path where the file will be stored in the repo\n", " repo_id=repo_id,\n", " repo_type=\"dataset\", # This is a dataset repository\n", ")\n", "\n", "print(f\"Uploaded {zip_file} to Hugging Face at {repo_id}\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "e6e82f64-305a-476b-8bc2-563f628fb720", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "483bb56b-f337-4f97-a371-983f699e977d", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "671806dc-daab-49ca-8c61-07a12b61ea5b", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "4a3ec029-6154-44b3-bd61-ff7b63476407", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "19424ca3-aff1-478e-bb3d-67662954ab4f", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "8d099ea1-4e3c-461d-bac0-77007f8a1915", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "b5d56e60-6a1b-4456-99c2-efc43a39b67b", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "c9703c85-d652-4323-a86c-a2cac93aab98", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "83b1729e-5c49-45ea-bb73-89fc31dcd27e", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "010f08e4-f439-42d4-ae71-91a2f0f3d798", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "1ed6774d-ce15-4f1f-af6c-8130ffb6147a", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "d39f5624-ea0a-48c2-87f6-4ca52af85746", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "af545751-1d0d-4615-b1b0-c7c24015525a", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "1f40129c-21a5-4585-9901-f763c55077eb", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "813ec0ca-f2e6-4bcf-b456-40aead7fc864", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "c73f86f2-4ddb-4ab6-a996-f8b73036940e", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "cc21d46c-c173-4010-854c-991507c7fa72", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "264311e4-4444-48b4-94db-d2b055375fad", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "0808a008-5b6b-4c0b-b553-bf2f24190c22", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "f55aed91-d9ce-4c0d-ac91-e8923858b07f", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "9af5b78c-ff5f-497d-8e7d-50cc920daa5c", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "50307ccb-485b-4349-9041-94e11c804372", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "8d1a48a6-a3c2-4725-8c12-4ad4a77af06f", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "93a4c4fc-8c76-4aa9-baf1-419542acd3f4", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "b04c5443-48b8-4b21-8dd7-ba351fd5475b", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "8fe35b8a-0ecd-41b1-93e5-f677db733ab3", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "721fa523-789e-4a27-9a2e-1464193244fd", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "ef9b5c18-c264-4714-8b3d-5e015e5d89b2", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "feef3d76-cad8-4115-ba55-7dc5e99d638a", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "807cffe5-39f2-42ae-8135-98c7a86cd88f", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 5 }