{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "54bbfa73-b27d-44a0-895c-81c4d7b3ed7e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: nbimporter in c:\\users\\wchl\\anaconda3\\envs\\speech_analysis\\lib\\site-packages (0.3.4)\n" ] } ], "source": [ "!pip install nbimporter" ] }, { "cell_type": "code", "execution_count": 2, "id": "79a3e8c9-559a-4356-9c39-c4abb09ca60d", "metadata": {}, "outputs": [ { "ename": "RuntimeError", "evalue": "Failed to import transformers.pipelines because of the following error (look up to see its traceback):\nDescriptors cannot be created directly.\nIf this call came from a _pb2.py file, your generated code is out of date and must be regenerated with protoc >= 3.19.0.\nIf you cannot immediately regenerate your protos, some other possible workarounds are:\n 1. Downgrade the protobuf package to 3.20.x or lower.\n 2. Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python (but this will use pure-Python parsing and will be much slower).\n\nMore information: https://developers.google.com/protocol-buffers/docs/news/2022-05-06#python-updates", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\transformers\\utils\\import_utils.py:1390\u001b[0m, in \u001b[0;36m_LazyModule._get_module\u001b[1;34m(self, module_name)\u001b[0m\n\u001b[0;32m 1389\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m-> 1390\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mimportlib\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mimport_module\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m.\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mmodule_name\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;18;43m__name__\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1391\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\importlib\\__init__.py:127\u001b[0m, in \u001b[0;36mimport_module\u001b[1;34m(name, package)\u001b[0m\n\u001b[0;32m 126\u001b[0m level \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m--> 127\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_bootstrap\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_gcd_import\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m[\u001b[49m\u001b[43mlevel\u001b[49m\u001b[43m:\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpackage\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlevel\u001b[49m\u001b[43m)\u001b[49m\n", "File \u001b[1;32m:1030\u001b[0m, in \u001b[0;36m_gcd_import\u001b[1;34m(name, package, level)\u001b[0m\n", "File \u001b[1;32m:1007\u001b[0m, in \u001b[0;36m_find_and_load\u001b[1;34m(name, import_)\u001b[0m\n", "File \u001b[1;32m:986\u001b[0m, in \u001b[0;36m_find_and_load_unlocked\u001b[1;34m(name, import_)\u001b[0m\n", "File \u001b[1;32m:680\u001b[0m, in \u001b[0;36m_load_unlocked\u001b[1;34m(spec)\u001b[0m\n", "File \u001b[1;32m:850\u001b[0m, in \u001b[0;36mexec_module\u001b[1;34m(self, module)\u001b[0m\n", "File \u001b[1;32m:228\u001b[0m, in \u001b[0;36m_call_with_frames_removed\u001b[1;34m(f, *args, **kwds)\u001b[0m\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\transformers\\pipelines\\__init__.py:26\u001b[0m\n\u001b[0;32m 25\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mfeature_extraction_utils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m PreTrainedFeatureExtractor\n\u001b[1;32m---> 26\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mimage_processing_utils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m BaseImageProcessor\n\u001b[0;32m 27\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmodels\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mauto\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mconfiguration_auto\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m AutoConfig\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\transformers\\image_processing_utils.py:28\u001b[0m\n\u001b[0;32m 27\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mfeature_extraction_utils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m BatchFeature \u001b[38;5;28;01mas\u001b[39;00m BaseBatchFeature\n\u001b[1;32m---> 28\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mimage_transforms\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m center_crop, normalize, rescale\n\u001b[0;32m 29\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mimage_utils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m ChannelDimension\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\transformers\\image_transforms.py:47\u001b[0m\n\u001b[0;32m 46\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m is_tf_available():\n\u001b[1;32m---> 47\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mtf\u001b[39;00m\n\u001b[0;32m 49\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m is_flax_available():\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\tensorflow\\__init__.py:37\u001b[0m\n\u001b[0;32m 35\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtyping\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01m_typing\u001b[39;00m\n\u001b[1;32m---> 37\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mpython\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtools\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m module_util \u001b[38;5;28;01mas\u001b[39;00m _module_util\n\u001b[0;32m 38\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mpython\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mutil\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mlazy_loader\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m LazyLoader \u001b[38;5;28;01mas\u001b[39;00m _LazyLoader\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\tensorflow\\python\\__init__.py:37\u001b[0m\n\u001b[0;32m 36\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mpython\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m pywrap_tensorflow \u001b[38;5;28;01mas\u001b[39;00m _pywrap_tensorflow\n\u001b[1;32m---> 37\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mpython\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01meager\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m context\n\u001b[0;32m 39\u001b[0m \u001b[38;5;66;03m# pylint: enable=wildcard-import\u001b[39;00m\n\u001b[0;32m 40\u001b[0m \n\u001b[0;32m 41\u001b[0m \u001b[38;5;66;03m# Bring in subpackages.\u001b[39;00m\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\tensorflow\\python\\eager\\context.py:29\u001b[0m\n\u001b[0;32m 27\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01msix\u001b[39;00m\n\u001b[1;32m---> 29\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mframework\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m function_pb2\n\u001b[0;32m 30\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mprotobuf\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m config_pb2\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\tensorflow\\core\\framework\\function_pb2.py:16\u001b[0m\n\u001b[0;32m 13\u001b[0m _sym_db \u001b[38;5;241m=\u001b[39m _symbol_database\u001b[38;5;241m.\u001b[39mDefault()\n\u001b[1;32m---> 16\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mframework\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m attr_value_pb2 \u001b[38;5;28;01mas\u001b[39;00m tensorflow_dot_core_dot_framework_dot_attr__value__pb2\n\u001b[0;32m 17\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mframework\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m node_def_pb2 \u001b[38;5;28;01mas\u001b[39;00m tensorflow_dot_core_dot_framework_dot_node__def__pb2\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\tensorflow\\core\\framework\\attr_value_pb2.py:16\u001b[0m\n\u001b[0;32m 13\u001b[0m _sym_db \u001b[38;5;241m=\u001b[39m _symbol_database\u001b[38;5;241m.\u001b[39mDefault()\n\u001b[1;32m---> 16\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mframework\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m tensor_pb2 \u001b[38;5;28;01mas\u001b[39;00m tensorflow_dot_core_dot_framework_dot_tensor__pb2\n\u001b[0;32m 17\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mframework\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m tensor_shape_pb2 \u001b[38;5;28;01mas\u001b[39;00m tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\tensorflow\\core\\framework\\tensor_pb2.py:16\u001b[0m\n\u001b[0;32m 13\u001b[0m _sym_db \u001b[38;5;241m=\u001b[39m _symbol_database\u001b[38;5;241m.\u001b[39mDefault()\n\u001b[1;32m---> 16\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mframework\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m resource_handle_pb2 \u001b[38;5;28;01mas\u001b[39;00m tensorflow_dot_core_dot_framework_dot_resource__handle__pb2\n\u001b[0;32m 17\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mframework\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m tensor_shape_pb2 \u001b[38;5;28;01mas\u001b[39;00m tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\tensorflow\\core\\framework\\resource_handle_pb2.py:16\u001b[0m\n\u001b[0;32m 13\u001b[0m _sym_db \u001b[38;5;241m=\u001b[39m _symbol_database\u001b[38;5;241m.\u001b[39mDefault()\n\u001b[1;32m---> 16\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mframework\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m tensor_shape_pb2 \u001b[38;5;28;01mas\u001b[39;00m tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2\n\u001b[0;32m 17\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtensorflow\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mframework\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m types_pb2 \u001b[38;5;28;01mas\u001b[39;00m tensorflow_dot_core_dot_framework_dot_types__pb2\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\tensorflow\\core\\framework\\tensor_shape_pb2.py:36\u001b[0m\n\u001b[0;32m 18\u001b[0m DESCRIPTOR \u001b[38;5;241m=\u001b[39m _descriptor\u001b[38;5;241m.\u001b[39mFileDescriptor(\n\u001b[0;32m 19\u001b[0m name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtensorflow/core/framework/tensor_shape.proto\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[0;32m 20\u001b[0m package\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtensorflow\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 23\u001b[0m serialized_pb\u001b[38;5;241m=\u001b[39m_b(\u001b[38;5;124m'\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m,tensorflow/core/framework/tensor_shape.proto\u001b[39m\u001b[38;5;130;01m\\x12\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mtensorflow\u001b[39m\u001b[38;5;130;01m\\\"\u001b[39;00m\u001b[38;5;124mz\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\x10\u001b[39;00m\u001b[38;5;124mTensorShapeProto\u001b[39m\u001b[38;5;130;01m\\x12\u001b[39;00m\u001b[38;5;124m-\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\x03\u001b[39;00m\u001b[38;5;130;01m\\x64\u001b[39;00m\u001b[38;5;124mim\u001b[39m\u001b[38;5;130;01m\\x18\u001b[39;00m\u001b[38;5;130;01m\\x02\u001b[39;00m\u001b[38;5;124m \u001b[39m\u001b[38;5;130;01m\\x03\u001b[39;00m\u001b[38;5;124m(\u001b[39m\u001b[38;5;130;01m\\x0b\u001b[39;00m\u001b[38;5;130;01m\\x32\u001b[39;00m\u001b[38;5;124m .tensorflow.TensorShapeProto.Dim\u001b[39m\u001b[38;5;130;01m\\x12\u001b[39;00m\u001b[38;5;130;01m\\x14\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\x0c\u001b[39;00m\u001b[38;5;124munknown_rank\u001b[39m\u001b[38;5;130;01m\\x18\u001b[39;00m\u001b[38;5;130;01m\\x03\u001b[39;00m\u001b[38;5;124m \u001b[39m\u001b[38;5;130;01m\\x01\u001b[39;00m\u001b[38;5;124m(\u001b[39m\u001b[38;5;130;01m\\x08\u001b[39;00m\u001b[38;5;130;01m\\x1a\u001b[39;00m\u001b[38;5;124m!\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\x03\u001b[39;00m\u001b[38;5;130;01m\\x44\u001b[39;00m\u001b[38;5;124mim\u001b[39m\u001b[38;5;130;01m\\x12\u001b[39;00m\u001b[38;5;130;01m\\x0c\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\x04\u001b[39;00m\u001b[38;5;124msize\u001b[39m\u001b[38;5;130;01m\\x18\u001b[39;00m\u001b[38;5;130;01m\\x01\u001b[39;00m\u001b[38;5;124m \u001b[39m\u001b[38;5;130;01m\\x01\u001b[39;00m\u001b[38;5;124m(\u001b[39m\u001b[38;5;130;01m\\x03\u001b[39;00m\u001b[38;5;130;01m\\x12\u001b[39;00m\u001b[38;5;130;01m\\x0c\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\x04\u001b[39;00m\u001b[38;5;124mname\u001b[39m\u001b[38;5;130;01m\\x18\u001b[39;00m\u001b[38;5;130;01m\\x02\u001b[39;00m\u001b[38;5;124m \u001b[39m\u001b[38;5;130;01m\\x01\u001b[39;00m\u001b[38;5;124m(\u001b[39m\u001b[38;5;130;01m\\t\u001b[39;00m\u001b[38;5;124mB\u001b[39m\u001b[38;5;130;01m\\x87\u001b[39;00m\u001b[38;5;130;01m\\x01\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\x18\u001b[39;00m\u001b[38;5;124morg.tensorflow.frameworkB\u001b[39m\u001b[38;5;130;01m\\x11\u001b[39;00m\u001b[38;5;124mTensorShapeProtosP\u001b[39m\u001b[38;5;130;01m\\x01\u001b[39;00m\u001b[38;5;124mZSgithub.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_shape_go_proto\u001b[39m\u001b[38;5;130;01m\\xf8\u001b[39;00m\u001b[38;5;130;01m\\x01\u001b[39;00m\u001b[38;5;130;01m\\x01\u001b[39;00m\u001b[38;5;130;01m\\x62\u001b[39;00m\u001b[38;5;130;01m\\x06\u001b[39;00m\u001b[38;5;124mproto3\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m 24\u001b[0m )\n\u001b[0;32m 29\u001b[0m _TENSORSHAPEPROTO_DIM \u001b[38;5;241m=\u001b[39m _descriptor\u001b[38;5;241m.\u001b[39mDescriptor(\n\u001b[0;32m 30\u001b[0m name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mDim\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[0;32m 31\u001b[0m full_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtensorflow.TensorShapeProto.Dim\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[0;32m 32\u001b[0m filename\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m 33\u001b[0m file\u001b[38;5;241m=\u001b[39mDESCRIPTOR,\n\u001b[0;32m 34\u001b[0m containing_type\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m 35\u001b[0m fields\u001b[38;5;241m=\u001b[39m[\n\u001b[1;32m---> 36\u001b[0m \u001b[43m_descriptor\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mFieldDescriptor\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 37\u001b[0m \u001b[43m \u001b[49m\u001b[43mname\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43msize\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfull_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mtensorflow.TensorShapeProto.Dim.size\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mindex\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m 38\u001b[0m \u001b[43m \u001b[49m\u001b[43mnumber\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mtype\u001b[39;49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m3\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcpp_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m2\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlabel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m 39\u001b[0m \u001b[43m \u001b[49m\u001b[43mhas_default_value\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdefault_value\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m 40\u001b[0m \u001b[43m \u001b[49m\u001b[43mmessage_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43menum_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcontaining_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m 41\u001b[0m \u001b[43m \u001b[49m\u001b[43mis_extension\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mextension_scope\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m 42\u001b[0m \u001b[43m \u001b[49m\u001b[43mserialized_options\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfile\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mDESCRIPTOR\u001b[49m\u001b[43m)\u001b[49m,\n\u001b[0;32m 43\u001b[0m _descriptor\u001b[38;5;241m.\u001b[39mFieldDescriptor(\n\u001b[0;32m 44\u001b[0m name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mname\u001b[39m\u001b[38;5;124m'\u001b[39m, full_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtensorflow.TensorShapeProto.Dim.name\u001b[39m\u001b[38;5;124m'\u001b[39m, index\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m,\n\u001b[0;32m 45\u001b[0m number\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m2\u001b[39m, \u001b[38;5;28mtype\u001b[39m\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m9\u001b[39m, cpp_type\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m9\u001b[39m, label\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m,\n\u001b[0;32m 46\u001b[0m has_default_value\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m, default_value\u001b[38;5;241m=\u001b[39m_b(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m)\u001b[38;5;241m.\u001b[39mdecode(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mutf-8\u001b[39m\u001b[38;5;124m'\u001b[39m),\n\u001b[0;32m 47\u001b[0m message_type\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, enum_type\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, containing_type\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m 48\u001b[0m is_extension\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m, extension_scope\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m 49\u001b[0m serialized_options\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, file\u001b[38;5;241m=\u001b[39mDESCRIPTOR),\n\u001b[0;32m 50\u001b[0m ],\n\u001b[0;32m 51\u001b[0m extensions\u001b[38;5;241m=\u001b[39m[\n\u001b[0;32m 52\u001b[0m ],\n\u001b[0;32m 53\u001b[0m nested_types\u001b[38;5;241m=\u001b[39m[],\n\u001b[0;32m 54\u001b[0m enum_types\u001b[38;5;241m=\u001b[39m[\n\u001b[0;32m 55\u001b[0m ],\n\u001b[0;32m 56\u001b[0m serialized_options\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m 57\u001b[0m is_extendable\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[0;32m 58\u001b[0m syntax\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mproto3\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[0;32m 59\u001b[0m extension_ranges\u001b[38;5;241m=\u001b[39m[],\n\u001b[0;32m 60\u001b[0m oneofs\u001b[38;5;241m=\u001b[39m[\n\u001b[0;32m 61\u001b[0m ],\n\u001b[0;32m 62\u001b[0m serialized_start\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m149\u001b[39m,\n\u001b[0;32m 63\u001b[0m serialized_end\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m182\u001b[39m,\n\u001b[0;32m 64\u001b[0m )\n\u001b[0;32m 66\u001b[0m _TENSORSHAPEPROTO \u001b[38;5;241m=\u001b[39m _descriptor\u001b[38;5;241m.\u001b[39mDescriptor(\n\u001b[0;32m 67\u001b[0m name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mTensorShapeProto\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[0;32m 68\u001b[0m full_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtensorflow.TensorShapeProto\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 100\u001b[0m serialized_end\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m182\u001b[39m,\n\u001b[0;32m 101\u001b[0m )\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\google\\protobuf\\descriptor.py:621\u001b[0m, in \u001b[0;36mFieldDescriptor.__new__\u001b[1;34m(cls, name, full_name, index, number, type, cpp_type, label, default_value, message_type, enum_type, containing_type, is_extension, extension_scope, options, serialized_options, has_default_value, containing_oneof, json_name, file, create_key)\u001b[0m\n\u001b[0;32m 615\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__new__\u001b[39m(\u001b[38;5;28mcls\u001b[39m, name, full_name, index, number, \u001b[38;5;28mtype\u001b[39m, cpp_type, label,\n\u001b[0;32m 616\u001b[0m default_value, message_type, enum_type, containing_type,\n\u001b[0;32m 617\u001b[0m is_extension, extension_scope, options\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m 618\u001b[0m serialized_options\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m 619\u001b[0m has_default_value\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m, containing_oneof\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, json_name\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m 620\u001b[0m file\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, create_key\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m): \u001b[38;5;66;03m# pylint: disable=redefined-builtin\u001b[39;00m\n\u001b[1;32m--> 621\u001b[0m \u001b[43m_message\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mMessage\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_CheckCalledFromGeneratedFile\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 622\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m is_extension:\n", "\u001b[1;31mTypeError\u001b[0m: Descriptors cannot be created directly.\nIf this call came from a _pb2.py file, your generated code is out of date and must be regenerated with protoc >= 3.19.0.\nIf you cannot immediately regenerate your protos, some other possible workarounds are:\n 1. Downgrade the protobuf package to 3.20.x or lower.\n 2. Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python (but this will use pure-Python parsing and will be much slower).\n\nMore information: https://developers.google.com/protocol-buffers/docs/news/2022-05-06#python-updates", "\nThe above exception was the direct cause of the following exception:\n", "\u001b[1;31mRuntimeError\u001b[0m Traceback (most recent call last)", "Cell \u001b[1;32mIn[2], line 8\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mre\u001b[39;00m\n\u001b[0;32m 7\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtorchaudio\u001b[39;00m\n\u001b[1;32m----> 8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtransformers\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m pipeline\n\u001b[0;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtext2int\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m text_to_int\n\u001b[0;32m 10\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01misNumber\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m is_number\n", "File \u001b[1;32m:1055\u001b[0m, in \u001b[0;36m_handle_fromlist\u001b[1;34m(module, fromlist, import_, recursive)\u001b[0m\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\transformers\\utils\\import_utils.py:1380\u001b[0m, in \u001b[0;36m_LazyModule.__getattr__\u001b[1;34m(self, name)\u001b[0m\n\u001b[0;32m 1378\u001b[0m value \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_get_module(name)\n\u001b[0;32m 1379\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_class_to_module\u001b[38;5;241m.\u001b[39mkeys():\n\u001b[1;32m-> 1380\u001b[0m module \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_module\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_class_to_module\u001b[49m\u001b[43m[\u001b[49m\u001b[43mname\u001b[49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1381\u001b[0m value \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mgetattr\u001b[39m(module, name)\n\u001b[0;32m 1382\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n", "File \u001b[1;32m~\\anaconda3\\envs\\Speech_Analysis\\lib\\site-packages\\transformers\\utils\\import_utils.py:1392\u001b[0m, in \u001b[0;36m_LazyModule._get_module\u001b[1;34m(self, module_name)\u001b[0m\n\u001b[0;32m 1390\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m importlib\u001b[38;5;241m.\u001b[39mimport_module(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m module_name, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m)\n\u001b[0;32m 1391\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m-> 1392\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\n\u001b[0;32m 1393\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFailed to import \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodule_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m because of the following error (look up to see its\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 1394\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m traceback):\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 1395\u001b[0m ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n", "\u001b[1;31mRuntimeError\u001b[0m: Failed to import transformers.pipelines because of the following error (look up to see its traceback):\nDescriptors cannot be created directly.\nIf this call came from a _pb2.py file, your generated code is out of date and must be regenerated with protoc >= 3.19.0.\nIf you cannot immediately regenerate your protos, some other possible workarounds are:\n 1. Downgrade the protobuf package to 3.20.x or lower.\n 2. Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python (but this will use pure-Python parsing and will be much slower).\n\nMore information: https://developers.google.com/protocol-buffers/docs/news/2022-05-06#python-updates" ] } ], "source": [ "# Import necessary libraries and filter warnings\n", "import warnings\n", "warnings.filterwarnings(\"ignore\")\n", "import nbimporter\n", "import os\n", "import re\n", "import torchaudio\n", "from transformers import pipeline\n", "from text2int import text_to_int\n", "from isNumber import is_number\n", "from Text2List import text_to_list\n", "from convert2list import convert_to_list\n", "from processDoubles import process_doubles\n", "from replaceWords import replace_words\n", "pipe = pipeline(task=\"automatic-speech-recognition\", model=\"cdactvm/w2v-bert-2.0-hindi_v1\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "6075c345-ee6a-4810-b02f-88d517d272e2", "metadata": {}, "outputs": [], "source": [ "# # Process the audio file\n", "transcript = pipe(\"C:/Users/WCHL/Desktop/hindi_dataset/train/hindi_numbers_test/hindi7.mp3\")\n", "text_value = transcript['text']\n", "processd_doubles=process_doubles(text_value)\n", "converted_to_list=convert_to_list(processd_doubles,text_to_list())\n", "replaced_words = replace_words(converted_to_list)\n", "converted_text=text_to_int(replaced_words)\n", "print(f\"generated text : {text_value}\")\n", "print(f\"processed doubles : {processd_doubles}\")\n", "print(f\"converted to list : {converted_to_list}\")\n", "print(f\"replaced words : {replaced_words}\")\n", "print(f\"final text : {converted_text}\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "45040aca-7564-4b07-8e13-1fde7f4f8da2", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.18" } }, "nbformat": 4, "nbformat_minor": 5 }