{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "id": "2eSvM9zX_2d3" }, "outputs": [], "source": [ "%%capture\n", "# Installs Unsloth, Xformers (Flash Attention) and all other packages!\n", "!pip install \"unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git\"\n", "\n", "# We have to check which Torch version for Xformers (2.3 -> 0.0.27)\n", "from torch import __version__; from packaging.version import Version as V\n", "xformers = \"xformers==0.0.27\" if V(__version__) < V(\"2.4.0\") else \"xformers\"\n", "!pip install --no-deps {xformers} trl peft accelerate bitsandbytes triton" ] }, { "cell_type": "markdown", "metadata": { "id": "r2v_X2fA0Df5" }, "source": [ "* We support Llama, Mistral, Phi-3, Gemma, Yi, DeepSeek, Qwen, TinyLlama, Vicuna, Open Hermes etc\n", "* We support 16bit LoRA or 4bit QLoRA. Both 2x faster.\n", "* `max_seq_length` can be set to anything, since we do automatic RoPE Scaling via [kaiokendev's](https://kaiokendev.github.io/til) method.\n", "* With [PR 26037](https://github.com/huggingface/transformers/pull/26037), we support downloading 4bit models **4x faster**! [Our repo](https://huggingface.co/unsloth) has Llama, Mistral 4bit models.\n", "* [**NEW**] We make Phi-3 Medium / Mini **2x faster**! See our [Phi-3 Medium notebook](https://colab.research.google.com/drive/1hhdhBa1j_hsymiW9m-WzxQtgqTH_NHqi?usp=sharing)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 331, "referenced_widgets": [ "678374910a65405a8d0fad306fccc934", "70cb67b1a6f64b81a1c15231f181c7f5", "2ba8f80a90c1448ca39e1976cd93c351", "f058256b9892475caf2d74f1605c4ce2", "54def8f425f54876a5bde6f66d63629d", "365a03957dc441108fc176c5a614f31c", "e3a04b4a2b074779af4815841cecca39", "b9e6e7a9f19042b281e56781a5d22830", "e5eb3cb7ca72444c90ec0e72226b04ff", "8419a72c6f3645368cb97f7f2ed6cea1", "bf4b87f750434c519c27ecc65e1ee09d", "7edfaa85ba904a8a9b52dac4b008d27a", "d7586f8cd53340fab8476e5ff48e3362", "0dffec13261e4bf7aeeaae63826a233f", "d9086669a7234a5f850d3f5be84ee676", "05a1c7419b5549fbaf77f3adcde12bf4", "c5c9ec51972845ef948fef17255ce888", "40ec0224839647ca845472b43d015372", "6b915e7093d5479d8a84d6c5504abecf", "810a8dd6fe6d4d74b692cbe1c7ceda12", "c0af25b8612243f4bade23465eacdc4b", "05d3c577cff248ac9ff936e0ba469957", "03c771a0fd7f4f78b6c4428fe53eb928", "7f41f7d6008d4d1d88f773b05a3657b4", "3e8df03a267b4a88919eff5325d1bb65", "8ea3e08688c84d6ab8fdf1dfc903738e", "c881789cdabd4bcd8251d3250e4abe3a", "778a01b5778c4487a3187181c1c575e7", "c130b8a942364e6fa99c4f641e9bfc67", "36739a0f98294195836d5475809c3743", "e96cd5ee23b649d18de99fe8e835b837", "401e41bbc13f42a08391c8c0a6d90076", "6f2ed6b670aa43559e50b6411cfa6a13", "0a906be814484ff1950ba52e76f69da3", "fb33ed2a86094149b4dbf55a99dfc5c1", "734ee1f019db4443a02f50fe227b9e3f", "387ed2dd78a94293adeb79a0ccad0e6a", "94cc02dd099847628d666cca89f9319a", "e6d78e15a0c04baa8afb8c7a3d18fad7", "a3a7d13efc0a4c739f8d751ab5e7dcd4", "269083dd443e4802a6f12f743866f4da", "6ebe214f4ec4442a9829684aeefceb00", "342e822e575b437ebd6f8d17efcc497f", "f477ebc74b394daa898963c28312936b", "07791a6e30574cbe92283ddea21bd944", "2e0adfd1a6f74835aef62b270ac2e9aa", "daf1537134e64d478969812869f3d29d", "c32502463f07437885d3a51f31409e44", "2ef9d56652004e63baea4673522f6c89", "1fc6430cb4fd4d1ebcc6f7b81feea3ec", "6ea1b90667764395a122a9fd76e2bc63", "d3c828f5d2984860a8a24adb5d4f1166", "fb19e29935114144861a8f1462157a05", "673986359dc9427a98a4fb7a688ac887", "2fde72d7018f49319c22b84d37a327f4", "3640fd0dabf24eaba9154f8442463d1a", "d96134b2c4124330b934e08f9af4c824", "0836e47500b54b27b5fc4f8b39835842", "c8219032f7ed4d63a50ebcd37e0b39f0", "461419716e4e4b2caf926516f24cdc86", "8b3f4f9f92e14a558b3d4f0cab126bea", "b413d96cf0a044a2b01f01d4947f3a31", "3e06fb4c42c0403da3b440121a475618", "f2f7348548a64198a94c6b59374008fa", "497d2efd3c8b4bb88e0015bdc1dfe46e", "139198796298496db36f3577d390d7e8" ] }, "id": "QmUBVEnvCDJv", "outputId": "27084327-f4c5-4d6c-86c1-1358186a511c" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "🦥 Unsloth: Will patch your computer to enable 2x faster free finetuning.\n", "==((====))== Unsloth 2024.8: Fast Gemma2 patching. Transformers = 4.44.2.\n", " \\\\ /| GPU: Tesla T4. Max memory: 14.748 GB. Platform = Linux.\n", "O^O/ \\_/ \\ Pytorch: 2.4.0+cu121. CUDA = 7.5. CUDA Toolkit = 12.1.\n", "\\ / Bfloat16 = FALSE. FA [Xformers = 0.0.27.post2. FA2 = False]\n", " \"-____-\" Free Apache license: http://github.com/unslothai/unsloth\n", "Unsloth: Fast downloading is enabled - ignore downloading bars which are red colored!\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "678374910a65405a8d0fad306fccc934", "version_major": 2, "version_minor": 0 }, "text/plain": [ "model.safetensors: 0%| | 0.00/6.13G [00:00, ?B/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "7edfaa85ba904a8a9b52dac4b008d27a", "version_major": 2, "version_minor": 0 }, "text/plain": [ "generation_config.json: 0%| | 0.00/190 [00:00, ?B/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "03c771a0fd7f4f78b6c4428fe53eb928", "version_major": 2, "version_minor": 0 }, "text/plain": [ "tokenizer_config.json: 0%| | 0.00/46.4k [00:00, ?B/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "0a906be814484ff1950ba52e76f69da3", "version_major": 2, "version_minor": 0 }, "text/plain": [ "tokenizer.model: 0%| | 0.00/4.24M [00:00, ?B/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "07791a6e30574cbe92283ddea21bd944", "version_major": 2, "version_minor": 0 }, "text/plain": [ "special_tokens_map.json: 0%| | 0.00/636 [00:00, ?B/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "3640fd0dabf24eaba9154f8442463d1a", "version_major": 2, "version_minor": 0 }, "text/plain": [ "tokenizer.json: 0%| | 0.00/17.5M [00:00, ?B/s]" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "from unsloth import FastLanguageModel\n", "import torch\n", "max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!\n", "dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+\n", "load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.\n", "\n", "\n", "model, tokenizer = FastLanguageModel.from_pretrained(\n", " model_name = \"unsloth/gemma-2-9b\",\n", " max_seq_length = max_seq_length,\n", " dtype = dtype,\n", " load_in_4bit = load_in_4bit,\n", " token = \"hf_\",\n", ")" ] }, { "cell_type": "markdown", "metadata": { "id": "SXd9bTZd1aaL" }, "source": [ "We now add LoRA adapters so we only need to update 1 to 10% of all parameters!" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "6bZsfBuZDeCL", "outputId": "545f0089-219c-422f-fefb-911aaf3f57ff" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Unsloth 2024.8 patched 42 layers with 42 QKV layers, 42 O layers and 42 MLP layers.\n" ] } ], "source": [ "model = FastLanguageModel.get_peft_model(\n", " model,\n", " r = 16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128\n", " target_modules = [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",\n", " \"gate_proj\", \"up_proj\", \"down_proj\",],\n", " lora_alpha = 16,\n", " lora_dropout = 0, # Supports any, but = 0 is optimized\n", " bias = \"none\", # Supports any, but = \"none\" is optimized\n", " # [NEW] \"unsloth\" uses 30% less VRAM, fits 2x larger batch sizes!\n", " use_gradient_checkpointing = \"unsloth\", # True or \"unsloth\" for very long context\n", " random_state = 3407,\n", " use_rslora = False, # We support rank stabilized LoRA\n", " loftq_config = None, # And LoftQ\n", ")" ] }, { "cell_type": "markdown", "metadata": { "id": "vITh0KVJ10qX" }, "source": [ "\n", "### Data Prep\n", "We now use the Alpaca dataset from [yahma](https://huggingface.co/datasets/yahma/alpaca-cleaned), which is a filtered version of 52K of the original [Alpaca dataset](https://crfm.stanford.edu/2023/03/13/alpaca.html). You can replace this code section with your own data prep.\n", "\n", "**[NOTE]** To train only on completions (ignoring the user's input) read TRL's docs [here](https://huggingface.co/docs/trl/sft_trainer#train-on-completions-only).\n", "\n", "**[NOTE]** Remember to add the **EOS_TOKEN** to the tokenized output!! Otherwise you'll get infinite generations!\n", "\n", "If you want to use the `llama-3` template for ShareGPT datasets, try our conversational [notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing).\n", "\n", "For text completions like novel writing, try this [notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing)." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 177, "referenced_widgets": [ "8e3e3f1f39df42ec821d1b928564da09", "0358a94fbe204627823216b048bf68a5", "0e94c4a67ef7485fb5f56798415e0d3d", "827a99f8939d4f98be38c3b4fa6610ef", "6f0ee04758e249ad8a809f6587bfddb8", "b632a7d91c394689abb6e5e7904c0181", "2663539cda94465ca2f1e778b93e7ced", "83aa33671151496c82ca8b51318d3b13", "2410fda0ccaa4724b07e0d831ddd1e0a", "5e5b630f628845f496fe25dcac7ab1af", "c379c5ed71454632a6aee8dc13439507", "8fd25d3a45fc4238a5b6a1349ded3d3a", "4cdb9a601cbe4cbb8532453713d92c5f", "e76baf1c15864c0ba7a915b2fb466011", "6b18c64fac6145aea1ede78aa0ae5ad2", "b1b4fb6e26cb4123ba0c2503e5e97138", "881176dcdf6341169b72473cadd78c2a", "0a936e7ccf1e42278e7bffacf77835a8", "f6d293c0a1b34d4eb0ae830ca1e095a8", "5ca5e52ef3c84415a8beb785767c0cc3", "6944241baee64180aaa73a0764f15d25", "6b5fef8e89da41ffac0a1a566be5d6c7", "c611a12429b142f9be9d205f5ecbba8c", "51e71bead0e24c53b89c2269fc885e2f", "c6068bf2a9384f59968f9b5a733989d1", "850851391a094dffa489dbb326297f82", "9fa63e5129f04a579035eba7599fa87b", "5be7433df7c2482ba79eee336d0ea303", "6808e2d30a634a00a92a58260ea3a837", "f86517d5f95c45969a1b443d35d32876", "fe9d4e6150794e55b20e07cd97aacbb4", "ae8b8478245242f58ac23640fd078052", "6e2f5febc19d4cd8bab9351a8c7a4d54", "b832c981fa8c4e98b377aa32f16742f5", "5a01274f4adf4c0994ae8966445ea1af", "baabf5a5422a4046873853bac7aedc81", "fb05d0eb082e45e6b634595a484fd750", "8ceb560415db41498c557e1115e5f50b", "8c98c1ec39e142a6bcfc14cc1e21e0cd", "196a70176d2149959f79916994ff63e7", "e7af7ea26840451c8b69767a3c90e0cd", "0d0fc89c1cdc4977a251d49e8ea8a578", "6eb712bcfe4844fea213bddef56bbf73", "0317cd59f03e46b4a6c0e903428a4b99", "ea9b0e0d120e41d28ae40d15fa41c2be", "c68ea312f5cf496796550d47e41769dc", "ebbcbf688e594ec8beb672c71c372e6b", "8e08b3b48754473499f6cc42c5107856", "1bafa0b2762d4494b3b2ece57ed20447", "1ef0645d0c8749369f82ba23da1c74fa", "6823b8cd31a449d29c8f5781b95b0597", "ba72d0797dba49a7b50f6c499d8db39d", "6e661a4b9fa3477cbf2acfe8a649aea9", "a04dc7854ed54837be388ee661759131", "4b0dded7e7b444339ca7d69939fb9bea" ] }, "id": "LjY75GoYUCB8", "outputId": "7da24b8a-80b7-44f7-eafa-7964295db683" }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "8e3e3f1f39df42ec821d1b928564da09", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Downloading readme: 0%| | 0.00/450 [00:00, ?B/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "8fd25d3a45fc4238a5b6a1349ded3d3a", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Downloading data: 0%| | 0.00/158M [00:00, ?B/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "c611a12429b142f9be9d205f5ecbba8c", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Downloading data: 0%| | 0.00/144M [00:00, ?B/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "b832c981fa8c4e98b377aa32f16742f5", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Generating train split: 0%| | 0/172026 [00:00, ? examples/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "ea9b0e0d120e41d28ae40d15fa41c2be", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Map: 0%| | 0/172026 [00:00, ? examples/s]" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "alpaca_prompt = \"\"\"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "{}\n", "\n", "### Input:\n", "{}\n", "\n", "### Response:\n", "{}\"\"\"\n", "\n", "EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN\n", "def formatting_prompts_func(examples):\n", " instructions = examples[\"instruction\"]\n", " inputs = examples[\"input\"]\n", " outputs = examples[\"output\"]\n", " texts = []\n", " for instruction, input, output in zip(instructions, inputs, outputs):\n", " # Must add EOS_TOKEN, otherwise your generation will go on forever!\n", " text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN\n", " texts.append(text)\n", " return { \"text\" : texts, }\n", "pass\n", "\n", "from datasets import load_dataset\n", "dataset = load_dataset(\"BanglaLLM/bangla-alpaca-orca\", split = \"train\")\n", "dataset = dataset.map(formatting_prompts_func, batched = True,)" ] }, { "cell_type": "markdown", "metadata": { "id": "idAEIeSQ3xdS" }, "source": [ "\n", "### Train the model\n", "Now let's use Huggingface TRL's `SFTTrainer`! More docs here: [TRL SFT docs](https://huggingface.co/docs/trl/sft_trainer). We do 60 steps to speed things up, but you can set `num_train_epochs=1` for a full run, and turn off `max_steps=None`. We also support TRL's `DPOTrainer`!" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": [ "a112ce9cda9844978d756cf2c0992c4f", "3a263ba2e5c746008906215c6b9a9e77", "6d50cb4bab66462a8a63a5fe39607e3a", "30cc22cc8b1e4d1d8e6b5d3255257cca", "b001536f1c3046aab8d82bfaaeda77e9", "fa5b1da560114424be8322096e4962f2", "c2367cfce08a40e2ae924502a259affa", "bf3d34b1175b4eb08151a88051326994", "0354c2a3792e41188f4ee5df779273a3", "e449f5ddb6264049b0c19c23c689847d", "9e3b03487e34439aba797e2c5bb2c768" ] }, "id": "95_Nn-89DhsL", "outputId": "c4932606-6f39-431a-df86-6b9696552d4c" }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "a112ce9cda9844978d756cf2c0992c4f", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Map (num_proc=2): 0%| | 0/172026 [00:00, ? examples/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ "max_steps is given, it will override any value given in num_train_epochs\n" ] } ], "source": [ "from trl import SFTTrainer\n", "from transformers import TrainingArguments\n", "from unsloth import is_bfloat16_supported\n", "\n", "trainer = SFTTrainer(\n", " model = model,\n", " tokenizer = tokenizer,\n", " train_dataset = dataset,\n", " dataset_text_field = \"text\",\n", " max_seq_length = max_seq_length,\n", " dataset_num_proc = 2,\n", " packing = False, # Can make training 5x faster for short sequences.\n", " args = TrainingArguments(\n", " per_device_train_batch_size = 2,\n", " gradient_accumulation_steps = 4,\n", " warmup_steps = 5,\n", " max_steps = 160,\n", " learning_rate = 2e-4,\n", " fp16 = not is_bfloat16_supported(),\n", " bf16 = is_bfloat16_supported(),\n", " logging_steps = 1,\n", " optim = \"adamw_8bit\",\n", " weight_decay = 0.01,\n", " lr_scheduler_type = \"linear\",\n", " seed = 3407,\n", " output_dir = \"outputs\",\n", " ),\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "cellView": "form", "colab": { "base_uri": "https://localhost:8080/" }, "id": "2ejIt2xSNKKp", "outputId": "8e56a767-e4d9-4405-d04a-9cb445426b96" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "GPU = Tesla T4. Max memory = 14.748 GB.\n", "6.576 GB of memory reserved.\n" ] } ], "source": [ "#@title Show current memory stats\n", "gpu_stats = torch.cuda.get_device_properties(0)\n", "start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)\n", "max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)\n", "print(f\"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.\")\n", "print(f\"{start_gpu_memory} GB of memory reserved.\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 1000 }, "id": "yqxqAZ7KJ4oL", "outputId": "2e194c70-e5b2-4d86-da04-af205473bfc0" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "==((====))== Unsloth - 2x faster free finetuning | Num GPUs = 1\n", " \\\\ /| Num examples = 172,026 | Num Epochs = 1\n", "O^O/ \\_/ \\ Batch size per device = 2 | Gradient Accumulation steps = 4\n", "\\ / Total batch size = 8 | Total steps = 160\n", " \"-____-\" Number of trainable parameters = 54,018,048\n" ] }, { "data": { "text/html": [ "\n", "
Step | \n", "Training Loss | \n", "
---|---|
1 | \n", "1.305600 | \n", "
2 | \n", "1.398000 | \n", "
3 | \n", "1.628300 | \n", "
4 | \n", "1.467200 | \n", "
5 | \n", "1.355900 | \n", "
6 | \n", "1.228100 | \n", "
7 | \n", "1.216800 | \n", "
8 | \n", "1.187800 | \n", "
9 | \n", "1.186500 | \n", "
10 | \n", "0.937900 | \n", "
11 | \n", "1.217700 | \n", "
12 | \n", "0.845000 | \n", "
13 | \n", "1.157800 | \n", "
14 | \n", "1.055500 | \n", "
15 | \n", "1.130600 | \n", "
16 | \n", "0.845500 | \n", "
17 | \n", "1.042000 | \n", "
18 | \n", "1.155200 | \n", "
19 | \n", "0.957200 | \n", "
20 | \n", "0.869600 | \n", "
21 | \n", "0.906400 | \n", "
22 | \n", "0.936200 | \n", "
23 | \n", "1.197900 | \n", "
24 | \n", "0.865900 | \n", "
25 | \n", "1.002400 | \n", "
26 | \n", "0.984800 | \n", "
27 | \n", "0.722000 | \n", "
28 | \n", "0.958100 | \n", "
29 | \n", "1.146100 | \n", "
30 | \n", "0.927100 | \n", "
31 | \n", "0.751400 | \n", "
32 | \n", "0.900500 | \n", "
33 | \n", "1.009300 | \n", "
34 | \n", "1.208600 | \n", "
35 | \n", "1.342200 | \n", "
36 | \n", "0.991500 | \n", "
37 | \n", "0.985200 | \n", "
38 | \n", "0.932700 | \n", "
39 | \n", "0.927500 | \n", "
40 | \n", "0.563600 | \n", "
41 | \n", "1.149100 | \n", "
42 | \n", "1.057900 | \n", "
43 | \n", "0.627900 | \n", "
44 | \n", "0.741800 | \n", "
45 | \n", "0.959800 | \n", "
46 | \n", "0.629700 | \n", "
47 | \n", "0.702000 | \n", "
48 | \n", "0.900500 | \n", "
49 | \n", "0.974500 | \n", "
50 | \n", "1.075500 | \n", "
51 | \n", "1.070200 | \n", "
52 | \n", "0.974300 | \n", "
53 | \n", "0.981700 | \n", "
54 | \n", "0.940200 | \n", "
55 | \n", "1.078200 | \n", "
56 | \n", "1.051200 | \n", "
57 | \n", "0.968800 | \n", "
58 | \n", "0.613100 | \n", "
59 | \n", "0.976200 | \n", "
60 | \n", "1.094600 | \n", "
61 | \n", "0.859800 | \n", "
62 | \n", "0.871000 | \n", "
63 | \n", "0.638400 | \n", "
64 | \n", "0.889400 | \n", "
65 | \n", "1.186200 | \n", "
66 | \n", "0.889500 | \n", "
67 | \n", "1.040800 | \n", "
68 | \n", "0.751800 | \n", "
69 | \n", "1.011200 | \n", "
70 | \n", "0.772000 | \n", "
71 | \n", "0.820700 | \n", "
72 | \n", "0.567300 | \n", "
73 | \n", "1.165600 | \n", "
74 | \n", "0.999100 | \n", "
75 | \n", "1.157300 | \n", "
76 | \n", "0.675600 | \n", "
77 | \n", "0.923300 | \n", "
78 | \n", "0.834000 | \n", "
79 | \n", "1.128300 | \n", "
80 | \n", "1.036600 | \n", "
81 | \n", "0.800700 | \n", "
82 | \n", "1.117500 | \n", "
83 | \n", "0.665600 | \n", "
84 | \n", "0.979200 | \n", "
85 | \n", "0.681100 | \n", "
86 | \n", "1.115600 | \n", "
87 | \n", "0.807500 | \n", "
88 | \n", "0.928100 | \n", "
89 | \n", "0.883100 | \n", "
90 | \n", "0.797900 | \n", "
91 | \n", "0.940200 | \n", "
92 | \n", "1.062500 | \n", "
93 | \n", "0.770600 | \n", "
94 | \n", "1.002400 | \n", "
95 | \n", "0.971400 | \n", "
96 | \n", "1.028100 | \n", "
97 | \n", "0.908500 | \n", "
98 | \n", "1.131300 | \n", "
99 | \n", "0.845000 | \n", "
100 | \n", "1.052600 | \n", "
101 | \n", "0.897500 | \n", "
102 | \n", "0.761500 | \n", "
103 | \n", "1.103400 | \n", "
104 | \n", "0.842500 | \n", "
105 | \n", "0.752600 | \n", "
106 | \n", "1.050200 | \n", "
107 | \n", "0.912000 | \n", "
108 | \n", "0.877600 | \n", "
109 | \n", "0.934700 | \n", "
110 | \n", "0.987600 | \n", "
111 | \n", "0.636600 | \n", "
112 | \n", "0.940900 | \n", "
113 | \n", "0.896300 | \n", "
114 | \n", "1.003700 | \n", "
115 | \n", "0.933800 | \n", "
116 | \n", "0.915200 | \n", "
117 | \n", "0.899400 | \n", "
118 | \n", "0.818300 | \n", "
119 | \n", "0.811600 | \n", "
120 | \n", "1.039000 | \n", "
121 | \n", "0.710400 | \n", "
122 | \n", "0.925500 | \n", "
123 | \n", "0.929600 | \n", "
124 | \n", "0.904700 | \n", "
125 | \n", "0.899000 | \n", "
126 | \n", "0.786100 | \n", "
127 | \n", "1.049600 | \n", "
128 | \n", "1.071100 | \n", "
129 | \n", "0.814700 | \n", "
130 | \n", "0.652000 | \n", "
131 | \n", "0.680300 | \n", "
132 | \n", "0.839100 | \n", "
133 | \n", "0.736900 | \n", "
134 | \n", "0.893200 | \n", "
135 | \n", "0.873000 | \n", "
136 | \n", "0.972000 | \n", "
137 | \n", "1.037900 | \n", "
138 | \n", "0.802900 | \n", "
139 | \n", "0.947900 | \n", "
140 | \n", "0.792200 | \n", "
141 | \n", "0.882200 | \n", "
142 | \n", "0.688400 | \n", "
143 | \n", "0.923200 | \n", "
144 | \n", "0.866100 | \n", "
145 | \n", "1.030100 | \n", "
146 | \n", "0.964200 | \n", "
147 | \n", "0.887100 | \n", "
148 | \n", "0.940800 | \n", "
149 | \n", "1.025300 | \n", "
150 | \n", "0.921400 | \n", "
151 | \n", "0.743500 | \n", "
152 | \n", "0.866900 | \n", "
153 | \n", "1.143300 | \n", "
154 | \n", "0.942400 | \n", "
155 | \n", "0.880800 | \n", "
156 | \n", "1.039400 | \n", "
157 | \n", "0.901400 | \n", "
158 | \n", "0.934700 | \n", "
159 | \n", "0.967200 | \n", "
160 | \n", "0.889500 | \n", "
"
],
"text/plain": [
"