Datasets:

Modalities:
Tabular
Text
Formats:
json
Size:
< 1K
ArXiv:
DOI:
Libraries:
Datasets
Dask
File size: 4,660 Bytes
11c83cb
1
{"language": "Python", "id": 22, "repo_owner": "huggingface", "repo_name": "diffusers", "head_branch": "refactor-single-file", "workflow_name": "Run code quality checks", "workflow_filename": "pr_quality.yml", "workflow_path": ".github/workflows/pr_quality.yml", "contributor": "huggingface", "sha_fail": "68ddb2559e616656301858d441a523ebd64a710f", "sha_success": "7395283f23a66b634d0d316fdceadf17ed5f4f97", "workflow": "name: Run code quality checks\n\non:\n  pull_request:\n    branches:\n      - main\n  push:\n    branches:\n      - main\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}\n  cancel-in-progress: true\n\njobs:\n  check_code_quality:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v3\n      - name: Set up Python\n        uses: actions/setup-python@v4\n        with:\n          python-version: \"3.8\"\n      - name: Install dependencies\n        run: |\n          python -m pip install --upgrade pip\n          pip install .[quality]\n      - name: Check quality\n        run: |\n          ruff check examples tests src utils scripts\n          ruff format examples tests src utils scripts --check\n\n  check_repository_consistency:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v3\n      - name: Set up Python\n        uses: actions/setup-python@v4\n        with:\n          python-version: \"3.8\"\n      - name: Install dependencies\n        run: |\n          python -m pip install --upgrade pip\n          pip install .[quality]\n      - name: Check quality\n        run: |\n          python utils/check_copies.py\n          python utils/check_dummies.py\n          make deps_table_check_updated\n", "logs": [{"step_name": "check_code_quality/5_Check quality.txt", "log": "##[group]Run ruff check examples tests src utils scripts\n\u001b[36;1mruff check examples tests src utils scripts\u001b[0m\n\u001b[36;1mruff format examples tests src utils scripts --check\u001b[0m\nshell: /usr/bin/bash -e {0}\nenv:\n  pythonLocation: /opt/hostedtoolcache/Python/3.8.18/x64\n  PKG_CONFIG_PATH: /opt/hostedtoolcache/Python/3.8.18/x64/lib/pkgconfig\n  Python_ROOT_DIR: /opt/hostedtoolcache/Python/3.8.18/x64\n  Python2_ROOT_DIR: /opt/hostedtoolcache/Python/3.8.18/x64\n  Python3_ROOT_DIR: /opt/hostedtoolcache/Python/3.8.18/x64\n  LD_LIBRARY_PATH: /opt/hostedtoolcache/Python/3.8.18/x64/lib\n##[endgroup]\nsrc/diffusers/loaders/single_file_utils.py:23:8: F401 [*] `torch` imported but unused\nsrc/diffusers/loaders/single_file_utils.py:25:44: F401 [*] `safetensors.torch.load_file` imported but unused\nFound 2 errors.\n[*] 2 fixable with the `--fix` option.\n##[error]Process completed with exit code 1.\n"}], "diff": "diff --git a/src/diffusers/loaders/single_file_utils.py b/src/diffusers/loaders/single_file_utils.py\nindex 55b438b036..4dc4c71013 100644\n--- a/src/diffusers/loaders/single_file_utils.py\n+++ b/src/diffusers/loaders/single_file_utils.py\n@@ -20,9 +20,7 @@ from contextlib import nullcontext\n from io import BytesIO\n \n import requests\n-import torch\n import yaml\n-from safetensors.torch import load_file as safe_load\n from transformers import (\n     CLIPTextConfig,\n     CLIPTextModel,\n@@ -1117,7 +1115,9 @@ def create_text_encoders_and_tokenizers_from_ldm(\n     elif model_type == \"FrozenCLIPEmbedder\":\n         try:\n             config_name = \"openai/clip-vit-large-patch14\"\n-            text_encoder = create_text_encoder_from_ldm_clip_checkpoint(config_name, checkpoint, local_files_only=local_files_only)\n+            text_encoder = create_text_encoder_from_ldm_clip_checkpoint(\n+                config_name, checkpoint, local_files_only=local_files_only\n+            )\n             tokenizer = CLIPTokenizer.from_pretrained(config_name, local_files_only=local_files_only)\n \n         except Exception:\n@@ -1159,7 +1159,9 @@ def create_text_encoders_and_tokenizers_from_ldm(\n         try:\n             config_name = \"openai/clip-vit-large-patch14\"\n             tokenizer = CLIPTokenizer.from_pretrained(config_name, local_files_only=local_files_only)\n-            text_encoder = create_text_encoder_from_ldm_clip_checkpoint(config_name, checkpoint, local_files_only=local_files_only)\n+            text_encoder = create_text_encoder_from_ldm_clip_checkpoint(\n+                config_name, checkpoint, local_files_only=local_files_only\n+            )\n \n         except Exception:\n             raise ValueError(\n", "difficulty": 0, "changed_files": ["src/diffusers/loaders/single_file_utils.py"], "commit_link": "https://github.com/huggingface/diffusers/tree/68ddb2559e616656301858d441a523ebd64a710f"}