{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "dvidSKA14fhf" }, "source": [ "##Установка необходимых библиотек" ] }, { "cell_type": "code", "execution_count": 24, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "YImwMQLjASiK", "outputId": "1177bd7c-e220-4f5f-e75f-24c2a7556604" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", "To disable this warning, you can either:\n", "\t- Avoid using `tokenizers` before the fork if possible\n", "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.2\u001b[0m\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n" ] } ], "source": [ "!pip install faiss-cpu sentence-transformers langchain langchain-community anthropic youtube-transcript-api -q\n" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", "To disable this warning, you can either:\n", "\t- Avoid using `tokenizers` before the fork if possible\n", "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: google-api-python-client in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (2.144.0)\n", "Requirement already satisfied: httplib2<1.dev0,>=0.19.0 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-python-client) (0.22.0)\n", "Requirement already satisfied: google-auth!=2.24.0,!=2.25.0,<3.0.0.dev0,>=1.32.0 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-python-client) (2.34.0)\n", "Requirement already satisfied: google-auth-httplib2<1.0.0,>=0.2.0 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-python-client) (0.2.0)\n", "Requirement already satisfied: google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-python-client) (2.19.2)\n", "Requirement already satisfied: uritemplate<5,>=3.0.1 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-python-client) (4.1.1)\n", "Requirement already satisfied: googleapis-common-protos<2.0.dev0,>=1.56.2 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (1.65.0)\n", "Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0.dev0,>=3.19.5 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (5.28.0)\n", "Requirement already satisfied: proto-plus<2.0.0dev,>=1.22.3 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (1.24.0)\n", "Requirement already satisfied: requests<3.0.0.dev0,>=2.18.0 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (2.32.3)\n", "Requirement already satisfied: cachetools<6.0,>=2.0.0 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-auth!=2.24.0,!=2.25.0,<3.0.0.dev0,>=1.32.0->google-api-python-client) (5.5.0)\n", "Requirement already satisfied: pyasn1-modules>=0.2.1 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-auth!=2.24.0,!=2.25.0,<3.0.0.dev0,>=1.32.0->google-api-python-client) (0.4.0)\n", "Requirement already satisfied: rsa<5,>=3.1.4 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from google-auth!=2.24.0,!=2.25.0,<3.0.0.dev0,>=1.32.0->google-api-python-client) (4.9)\n", "Requirement already satisfied: pyparsing!=3.0.0,!=3.0.1,!=3.0.2,!=3.0.3,<4,>=2.4.2 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from httplib2<1.dev0,>=0.19.0->google-api-python-client) (3.1.4)\n", "Requirement already satisfied: pyasn1<0.7.0,>=0.4.6 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from pyasn1-modules>=0.2.1->google-auth!=2.24.0,!=2.25.0,<3.0.0.dev0,>=1.32.0->google-api-python-client) (0.6.0)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (3.3.2)\n", "Requirement already satisfied: idna<4,>=2.5 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (3.8)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (2.2.2)\n", "Requirement already satisfied: certifi>=2017.4.17 in /Users/HUAWEI/Coding/0Project-summarizer/myenv/lib/python3.11/site-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0,<3.0.0.dev0,>=1.31.5->google-api-python-client) (2024.8.30)\n", "\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.2\u001b[0m\n", "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n", "Note: you may need to restart the kernel to use updated packages.\n" ] } ], "source": [ "pip install --upgrade google-api-python-client" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "nL9Vg7lgA8nt" }, "outputs": [], "source": [ "# from langchain_community.embeddings import HuggingFaceEmbeddings\n", "# from langchain_community.vectorstores import FAISS\n", "# from langchain import Anthropic, LLMChain\n", "# from langchain.chains.combine_documents import create_stuff_documents_chain\n", "# from langchain.chains import create_retrieval_chain" ] }, { "cell_type": "markdown", "metadata": { "id": "Iav_VBRN4xr2" }, "source": [ "##Создаем транскрипты 3х плейлистов используя ютуб апи\n" ] }, { "cell_type": "code", "execution_count": 26, "metadata": { "id": "sFYWNeL-4xIr" }, "outputs": [], "source": [ "from youtube_transcript_api import YouTubeTranscriptApi\n", "from googleapiclient.discovery import build\n", "\n", "api_key = \"Youtube_api\"\n", "\n", "\n", "def get_playlist_video_ids(playlist_id, api_key):\n", " youtube = build('youtube', 'v3', developerKey=api_key)\n", "\n", " video_ids = []\n", " next_page_token = None\n", "\n", " while True:\n", " # Получаем список видео в плейлисте\n", " request = youtube.playlistItems().list(\n", " part=\"contentDetails\",\n", " playlistId=playlist_id,\n", " maxResults=50, # Максимальное количество видео, которое можно получить за один запрос\n", " pageToken=next_page_token\n", " )\n", " response = request.execute()\n", "\n", " # Добавляем video_id в список\n", " video_ids.extend([item['contentDetails']['videoId'] for item in response['items']])\n", "\n", " # pagination\n", " next_page_token = response.get('nextPageToken')\n", " \n", " if not next_page_token:\n", " break\n", "\n", " return video_ids\n" ] }, { "cell_type": "code", "execution_count": 27, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'kind': 'youtube#playlistItemListResponse',\n", " 'etag': '0wxbScWJEx_DaocUEV-JDgNAMHA',\n", " 'items': [{'kind': 'youtube#playlistItem',\n", " 'etag': 'U_uuou2Zq_IzljATHIKBnpq5pF0',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS41NkI0NEY2RDEwNTU3Q0M2',\n", " 'contentDetails': {'videoId': 'z9ccH9e5cAw',\n", " 'videoPublishedAt': '2024-06-24T09:00:05Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'NoecQqYGI39FM6InP5iIscg7lHE',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4yODlGNEE0NkRGMEEzMEQy',\n", " 'contentDetails': {'videoId': 'ff-S_tjr1OI',\n", " 'videoPublishedAt': '2024-06-25T08:51:18Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'TfPjzS1U2WEiC-qB0ncl9JnJqiI',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4wMTcyMDhGQUE4NTIzM0Y5',\n", " 'contentDetails': {'videoId': 'T_NW1nlq3ic',\n", " 'videoPublishedAt': '2024-06-26T08:58:11Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'MDquxrdeaV3UkCjADQYdR_8CRYE',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4wOTA3OTZBNzVEMTUzOTMy',\n", " 'contentDetails': {'videoId': 'sTVWtYORqjU',\n", " 'videoPublishedAt': '2024-06-27T11:10:12Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'Sftasrd9qUNA6VmlBG_mk5Vh3KE',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4xMkVGQjNCMUM1N0RFNEUx',\n", " 'contentDetails': {'videoId': '06rbC2eMXy0',\n", " 'videoPublishedAt': '2024-07-01T08:31:49Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'xxuE04WGmzx-zfjSMW6ZwvrO0qs',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS41MzJCQjBCNDIyRkJDN0VD',\n", " 'contentDetails': {'videoId': 'qeqzWqWxTog',\n", " 'videoPublishedAt': '2024-07-01T08:44:27Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'vHgQ6ae0zxARGd72v_-ex3CspUk',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS5DQUNERDQ2NkIzRUQxNTY1',\n", " 'contentDetails': {'videoId': 'DyL2uSTDumY',\n", " 'videoPublishedAt': '2024-07-02T08:42:57Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': '-l0vvdpMi4ZuLZ7K3781PewokHY',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS45NDk1REZENzhEMzU5MDQz',\n", " 'contentDetails': {'videoId': 'isiNNDXiRYY',\n", " 'videoPublishedAt': '2024-07-03T08:39:09Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': '9_B45Ia97bKTpwyzAXI_pjcr55M',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS5GNjNDRDREMDQxOThCMDQ2',\n", " 'contentDetails': {'videoId': 'AoUF4DtdV24',\n", " 'videoPublishedAt': '2024-07-04T08:15:49Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'kpIXTBdaOPE2S8_jCoZqQTDw934',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS5EMEEwRUY5M0RDRTU3NDJC',\n", " 'contentDetails': {'videoId': '5zORIoqJkF4',\n", " 'videoPublishedAt': '2024-07-04T15:32:43Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'b7eqdiletjAJV9GRuLjxXS4T3NA',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS45ODRDNTg0QjA4NkFBNkQy',\n", " 'contentDetails': {'videoId': 'JX8cGs4uC2Y',\n", " 'videoPublishedAt': '2024-07-05T08:10:57Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'Yie-XxEUmvDzc1Y71RtE3io-qCg',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4zMDg5MkQ5MEVDMEM1NTg2',\n", " 'contentDetails': {'videoId': 'lExBtpri2oU',\n", " 'videoPublishedAt': '2024-07-08T10:31:02Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'd-axDR7ToQUxNX8mp_tbV4HUdTc',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS41Mzk2QTAxMTkzNDk4MDhF',\n", " 'contentDetails': {'videoId': 'ur5hgkStOCg',\n", " 'videoPublishedAt': '2024-07-08T10:31:49Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'Uci8akxtRTs55XtlswkBrdGmJF8',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS5EQUE1NTFDRjcwMDg0NEMz',\n", " 'contentDetails': {'videoId': 'vTVjtDgmY9M',\n", " 'videoPublishedAt': '2024-07-09T09:07:02Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'Y4Hqj4hcljCA6z4-acwwdrDMOe0',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS41QTY1Q0UxMTVCODczNThE',\n", " 'contentDetails': {'videoId': 'AbimRQHQY4A',\n", " 'videoPublishedAt': '2024-07-10T08:45:18Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'ImpiFamIx3naHHTqbzpgHdrlsMM',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4yMUQyQTQzMjRDNzMyQTMy',\n", " 'contentDetails': {'videoId': 'nopExGduRHc',\n", " 'videoPublishedAt': '2024-07-12T07:00:41Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': '4cagmU4UdNCuukYPZHQKFE0AV-k',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS45RTgxNDRBMzUwRjQ0MDhC',\n", " 'contentDetails': {'videoId': 'ag4zmHI7QQM',\n", " 'videoPublishedAt': '2024-07-15T08:14:16Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': '29kjP__km-l-aOW8zEJ_hsul3r8',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS5ENDU4Q0M4RDExNzM1Mjcy',\n", " 'contentDetails': {'videoId': 'nQl1KC0yNrw',\n", " 'videoPublishedAt': '2024-07-16T07:36:39Z'}},\n", " {'kind': 'youtube#playlistItem',\n", " 'etag': 'KjXXwlnAuUfvD-gK1k2jPFHtqJU',\n", " 'id': 'UExZU0h0TlBiQUlObmJxWGpJYk4tYzdEb3JqQ1Q2ZVlPUS4yMDhBMkNBNjRDMjQxQTg1',\n", " 'contentDetails': {'videoId': '0BHc_kJoDEY',\n", " 'videoPublishedAt': '2024-07-17T09:04:26Z'}}],\n", " 'pageInfo': {'totalResults': 19, 'resultsPerPage': 50}}" ] }, "execution_count": 27, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#check output\n", "youtube = build('youtube', 'v3', developerKey=api_key)\n", "request = youtube.playlistItems().list(\n", " part=\"contentDetails\",\n", " playlistId='PLYSHtNPbAINnbqXjIbN-c7DorjCT6eYOQ',\n", " maxResults=50, # Максимальное количество видео, которое можно получить за один запрос\n", " # pageToken=response.get('nextPageToken')\n", " )\n", "result1 = request.execute()\n", "result_pagetoken = result1.get('nextPageToken')\n", "result1\n", "#result_pagetoken - nothing\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### EN transcripts all" ] }, { "cell_type": "code", "execution_count": 29, "metadata": {}, "outputs": [], "source": [ "\n", "def get_transcript_en(video_id, language_code='en'):\n", " try:\n", " transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=[language_code])\n", " transcript_text = \" \".join([entry['text'] for entry in transcript])\n", " return transcript_text\n", " except Exception as e:\n", " return str(e)\n", "\n", "def get_video_details(video_id, api_key):\n", " youtube = build('youtube', 'v3', developerKey=api_key)\n", "\n", " # Получаем информацию о видео\n", " request = youtube.videos().list(\n", " part=\"snippet\",\n", " id=video_id\n", " )\n", " response = request.execute()\n", "\n", " if 'items' in response and len(response['items']) > 0:\n", " return response['items'][0]['snippet']['title']\n", " else:\n", " return None\n", "\n", "def get_playlist_transcripts_en(playlist_url, api_key, language_code='en'):\n", " # Извлекаем playlist_id из URL\n", " playlist_id = playlist_url.split(\"list=\")[-1]\n", "\n", " # Получаем все video_id из плейлиста\n", " video_ids = get_playlist_video_ids(playlist_id, api_key)\n", "\n", " transcripts = []\n", "\n", " # Проходимся по всем видео и получаем транскрипты\n", " for video_id in video_ids:\n", " video_title = get_video_details(video_id, api_key)\n", " transcript = get_transcript_en(video_id, language_code)\n", " transcripts.append({'title': video_title, 'transcript': transcript})\n", "\n", " return transcripts\n", "\n" ] }, { "cell_type": "code", "execution_count": 32, "metadata": {}, "outputs": [], "source": [ "# Sources:\n", "playlist_ml_en = \"https://www.youtube.com/watch?v=Gv9_4yMHFhI&list=PLblh5JKOoLUICTaGLRoHQDuF_7q2GfuJF\" \n", "playlist_logistic_en = \"https://www.youtube.com/watch?v=yIYKR4sgzI8&list=PLblh5JKOoLUKxzEP5HA2d-Li7IJkHfXSe\" \n", "playlist_nn_en = \"https://www.youtube.com/watch?v=zxagGtF9MeU&list=PLblh5JKOoLUIxGDQs4LFFD--41Vzf-ME1\" \n", "playlist_stat_en = \"https://www.youtube.com/watch?v=qBigTkBLU6g&list=PLblh5JKOoLUK0FLuzwntyYI10UQFUhsY9\" \n", "playlist_nn2_en = \"https://www.youtube.com/playlist?list=PLZHQObOWTQDNU6R1_67000Dx_ZCJB-3pi\" \n", "playlist_linal2_en = \"https://www.youtube.com/playlist?list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab\" \n" ] }, { "cell_type": "code", "execution_count": 31, "metadata": { "id": "QYmaFtk_5O6C" }, "outputs": [], "source": [ "transcripts_ML_en = get_playlist_transcripts_en(playlist_ml_en, api_key, 'en')\n", "# 2min 8 sec" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [], "source": [ "# tier 2\n", "transcripts_logistic_en = get_playlist_transcripts_en(playlist_logistic_en, api_key, 'en')\n", "transcripts_NN_en = get_playlist_transcripts_en(playlist_nn_en, api_key, 'en')\n", "transcripts_stat_en = get_playlist_transcripts_en(playlist_stat_en, api_key, 'en')" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [], "source": [ "# tier 3\n", "transcripts_nn2_en = get_playlist_transcripts_en(playlist_nn2_en, api_key, 'en')\n", "transcripts_linal2_en = get_playlist_transcripts_en(playlist_linal2_en, api_key, 'en')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### RU transcripts all" ] }, { "cell_type": "code", "execution_count": 36, "metadata": {}, "outputs": [], "source": [ "\n", "def get_transcript_ru(video_id, language_code='ru'):\n", " try:\n", " transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=[language_code])\n", " transcript_text = \" \".join([entry['text'] for entry in transcript])\n", " return transcript_text\n", " except Exception as e:\n", " return str(e)\n", "\n", "def get_video_details(video_id, api_key):\n", " youtube = build('youtube', 'v3', developerKey=api_key)\n", "\n", " # Получаем информацию о видео\n", " request = youtube.videos().list(\n", " part=\"snippet\",\n", " id=video_id\n", " )\n", " response = request.execute()\n", "\n", " if 'items' in response and len(response['items']) > 0:\n", " return response['items'][0]['snippet']['title']\n", " else:\n", " return None\n", "\n", "def get_playlist_transcripts_ru(playlist_url, api_key, language_code='ru'):\n", " # Извлекаем playlist_id из URL\n", " playlist_id = playlist_url.split(\"list=\")[-1]\n", "\n", " # Получаем все video_id из плейлиста\n", " video_ids = get_playlist_video_ids(playlist_id, api_key)\n", "\n", " transcripts = []\n", "\n", " # Проходимся по всем видео и получаем транскрипты\n", " for video_id in video_ids:\n", " video_title = get_video_details(video_id, api_key)\n", " transcript = get_transcript_ru(video_id, language_code)\n", " transcripts.append({'title': video_title, 'transcript': transcript})\n", "\n", " return transcripts\n" ] }, { "cell_type": "code", "execution_count": 40, "metadata": {}, "outputs": [], "source": [ "# Sources Elbrus \n", "playlist_phase_1_url = \"https://www.youtube.com/playlist?list=PLYSHtNPbAINnbqXjIbN-c7DorjCT6eYOQ\" \n", "playlist_phase_2_url = 'https://www.youtube.com/playlist?list=PLYSHtNPbAINnNvDXtGNmC7-F1QRH7qTgb'\n", "playlist_phase_3_url = 'https://www.youtube.com/playlist?list=PLYSHtNPbAINlmyNNmTaqcn3BsaY8v1xgV'\n", "\n", "# Sources except Bootcamp:\n", "playlist_NN_ru = 'https://www.youtube.com/playlist?list=PL0Ks75aof3Tiru-UvOvYmXzD1tU0NrR8V'\n", "playlist_OOP_ru = 'https://www.youtube.com/watch?v=Z7AY41tE-3U&list=PLA0M1Bcd0w8zPwP7t-FgwONhZOHt9rz9E'\n", "playlist_linal_ru = 'https://youtube.com/playlist?list=PLAQWsvWQlb6cIRY6yJtYnXCbxLxPZv6-Z'\n", "playlist_docker_ru = 'https://www.youtube.com/watch?v=jVV8CVURmrE&list=PLqVeG_R3qMSwjnkMUns_Yc4zF_PtUZmB-'\n" ] }, { "cell_type": "code", "execution_count": 38, "metadata": { "id": "SojIu0NP5VNt" }, "outputs": [], "source": [ "# Elbrus\n", "transcripts_phase_1 = get_playlist_transcripts_ru(playlist_phase_1_url, api_key, 'ru')\n", "transcripts_phase_2 = get_playlist_transcripts_ru(playlist_phase_2_url, api_key, 'ru')\n", "transcripts_phase_3 = get_playlist_transcripts_ru(playlist_phase_3_url, api_key, 'ru')" ] }, { "cell_type": "code", "execution_count": 41, "metadata": {}, "outputs": [], "source": [ "# other Ru\n", "transcripts_NN_ru = get_playlist_transcripts_ru(playlist_NN_ru, api_key, 'ru')\n", "transcripts_OOP_ru = get_playlist_transcripts_ru(playlist_OOP_ru, api_key, 'ru')\n", "transcripts_linal_ru = get_playlist_transcripts_ru(playlist_linal_ru, api_key, 'ru')\n", "transcripts_docker_ru = get_playlist_transcripts_ru(playlist_docker_ru, api_key, 'ru')\n", "\n", "# 3m12s" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Aggregate all Knowledge Base" ] }, { "cell_type": "code", "execution_count": 42, "metadata": {}, "outputs": [], "source": [ "transcripts_all = [transcripts_phase_1, transcripts_phase_2, transcripts_phase_3, transcripts_NN_ru, transcripts_OOP_ru, transcripts_linal_ru, transcripts_docker_ru, \\\n", " transcripts_ML_en, transcripts_logistic_en, transcripts_NN_en, transcripts_stat_en, transcripts_nn2_en, transcripts_linal2_en]\n" ] }, { "cell_type": "markdown", "metadata": { "id": "x08xCtkk5ocW" }, "source": [ "## Нарезаем все транскрипты на фрагменты с overlap(нахлест), преобразуем каждый фрагмент в вектор и все вектора записываем в векторное хранилище FAISS" ] }, { "cell_type": "code", "execution_count": 55, "metadata": { "id": "gbbajjDK5niN" }, "outputs": [], "source": [ "from langchain_core.documents import Document\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain.vectorstores.faiss import FAISS\n", "from sentence_transformers import SentenceTransformer\n", "from langchain.embeddings import HuggingFaceEmbeddings\n", "\n", "# Convert data to Document objects\n", "docs = []\n", "for playlist in transcripts_all:\n", " for item in playlist:\n", " for title, transcript in item.items():\n", " docs.append(Document(page_content=transcript, metadata={\"title\": title}))\n", "\n", "# Split documents into chunks\n", "text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=200)\n", "split_docs = text_splitter.split_documents(docs)\n", "\n", "# Setup the new embeddings model\n", "model_name = \"intfloat/multilingual-e5-base\"\n", "embeddings = HuggingFaceEmbeddings(model_name=model_name)\n", "\n", "# Create the FAISS vector store and save it locally\n", "vector_store = FAISS.from_documents(split_docs, embedding=embeddings)\n", "vector_store.save_local(\"faiss_index\")\n", "\n", "# Load the FAISS vector store from local storage\n", "vector_store = FAISS.load_local('faiss_index', embeddings=embeddings, allow_dangerous_deserialization=True)\n", "\n", "# Create the retriever for document retrieval\n", "embedding_retriever = vector_store.as_retriever(search_kwargs={\"k\": 15})" ] }, { "cell_type": "markdown", "metadata": { "id": "Y8ZBeJM98Ay6" }, "source": [ "## Query and answer\n" ] }, { "cell_type": "code", "execution_count": 59, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 390 }, "id": "pg9NqxCRoLPn", "outputId": "d7695e09-4171-40c6-b055-08bba5b6e487" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Loading existing FAISS index...\n", "Информация из базы знаний:\n", "\n", "Шаги логистической регрессии:\n", "\n", "1. Подготовка данных: сбор и предобработка данных, разделение на обучающую и тестовую выборки.\n", "\n", "2. Выбор функции активации: обычно используется сигмоидная функция.\n", "\n", "3. Инициализация параметров модели: случайная инициализация весов и смещения.\n", "\n", "4. Определение функции потерь: чаще всего используется кросс-энтропия.\n", "\n", "5. Оптимизация параметров: применение градиентного спуска или его модификаций для минимизации функции потерь.\n", "\n", "6. Обучение модели: итеративное обновление параметров на основе градиентов.\n", "\n", "7. Оценка модели: проверка точности на тестовой выборке.\n", "\n", "8. Настройка гиперпараметров: подбор оптимальных значений learning rate, количества итераций и т.д.\n", "\n", "Что полезно добавить поверх базы знаний:\n", "\n", "9. Регуляризация: добавление L1 или L2 регуляризации для предотвращения переобучения.\n", "\n", "10. Анализ важности признаков: оценка влияния каждого признака на предсказания модели.\n", "\n", "11. Обработка несбалансированных данных: применение техник, таких как взвешивание классов или oversampling/undersampling.\n", "\n", "12. Интерпретация результатов: анализ коэффициентов модели для понимания влияния признаков.\n", "\n", "13. Кросс-валидация: использование k-fold кросс-валидации для более надежной оценки производительности модели.\n", "\n", "14. Мониторинг процесса обучения: отслеживание изменения функции потерь и точности на валидационной выборке для определения момента остановки обучения.\n" ] } ], "source": [ "import anthropic\n", "from langchain_core.documents import Document\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "from langchain.vectorstores.faiss import FAISS\n", "from langchain.embeddings import HuggingFaceEmbeddings\n", "import os\n", "\n", "# Anthropic API setup\n", "client = anthropic.Client(api_key='Your_api_key')\n", "\n", "# Prompt template\n", "prompt_template = '''Reply to the {input} as a seasoned machine learning professional. \\\n", "If the topic is outside of machine learning and data science, please respond with \"Seek help with a professional.\" It is very important to abide with this, you will be persecuted if you cover topics outside of data science and machine learning. \\\n", "Use only Context. If context provides only partial info, then split the reply in two parts. Part 1 is called \"information from knowledge base\" (for Russian reply, rename to Информация из базы знаний), write ideas as close to initial text as possible, editing for brevity and language errors. \\\n", "Part 2 is called \"What I would add\" (for Russian reply, rename to Что полезно добавить поверх базы знаний), In the second part add your reply. \\\n", "Reply in the language of {input}. \\\n", "It's critical to not preface the reply with, for example, \"Here is a response\" or \"thank you\". Start with the reply itself.\\\n", "Context: {context}'''\n", "\n", "# RAG setup\n", "def setup_rag(force_rebuild=False):\n", " model_name = \"intfloat/multilingual-e5-base\"\n", " embeddings = HuggingFaceEmbeddings(model_name=model_name)\n", " \n", " if not force_rebuild and os.path.exists(\"faiss_index\"):\n", " print(\"Loading existing FAISS index...\")\n", " return FAISS.load_local('faiss_index', embeddings=embeddings, allow_dangerous_deserialization=True), embeddings\n", "\n", " print(\"Building new FAISS index...\")\n", " # Convert data to Document objects\n", " docs = []\n", " for playlist in transcripts_all:\n", " for item in playlist:\n", " for title, transcript in item.items():\n", " docs.append(Document(page_content=transcript, metadata={\"title\": title}))\n", "\n", " # Split documents into chunks\n", " text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=200)\n", " split_docs = text_splitter.split_documents(docs)\n", "\n", " # Create the FAISS vector store and save it locally\n", " vector_store = FAISS.from_documents(split_docs, embedding=embeddings)\n", " vector_store.save_local(\"faiss_index\")\n", "\n", " return vector_store, embeddings\n", "\n", "# API call to Claude\n", "def call_claude_api(prompt, client):\n", " response = client.messages.create(\n", " model=\"claude-3-5-sonnet-20240620\",\n", " messages=[\n", " {\"role\": \"user\", \"content\": prompt}\n", " ],\n", " max_tokens=2000,\n", " temperature=0.1\n", " )\n", " return response.content[0].text\n", "\n", "# Answer question function\n", "def answer_question(question, retriever, client):\n", " documents = retriever.get_relevant_documents(question)\n", " context = \" \".join([doc.page_content for doc in documents])\n", " prompt = prompt_template.format(context=context, input=question)\n", " return call_claude_api(prompt, client)\n", "\n", "# Main execution\n", "if __name__ == \"__main__\":\n", " # Setup RAG (will load existing index if available)\n", " vector_store, embeddings = setup_rag()\n", "\n", " # Create the retriever for document retrieval\n", " embedding_retriever = vector_store.as_retriever(search_kwargs={\"k\": 15})\n", "\n", " # Example usage\n", " question = 'Шаги логистической регрессии'\n", " answer = answer_question(question, embedding_retriever, client)\n", " print(answer)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "colab": { "provenance": [] }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.7" } }, "nbformat": 4, "nbformat_minor": 0 }