Datasets:

Modalities:
Tabular
Text
Formats:
json
Size:
< 1K
ArXiv:
DOI:
Libraries:
Datasets
Dask
lca-ci-builds-repair / data /2c06ffa.json
galtimur's picture
new files
7b61cd4
raw
history blame
6.26 kB
{"language": "Python", "id": 3, "repo_owner": "huggingface", "repo_name": "diffusers", "head_branch": "ipadapterfaceid", "workflow_name": "Run code quality checks", "workflow_filename": "pr_quality.yml", "workflow_path": ".github/workflows/pr_quality.yml", "contributor": "fabiorigano", "sha_fail": "2c06ffa4c9d2c37846c60ad75899b4d72f214ff9", "sha_success": "217d9d073981605acab5200fc841f20c798c1449", "workflow": "name: Run code quality checks\n\non:\n pull_request:\n branches:\n - main\n push:\n branches:\n - main\n\nconcurrency:\n group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}\n cancel-in-progress: true\n\njobs:\n check_code_quality:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v3\n - name: Set up Python\n uses: actions/setup-python@v4\n with:\n python-version: \"3.8\"\n - name: Install dependencies\n run: |\n python -m pip install --upgrade pip\n pip install .[quality]\n - name: Check quality\n run: |\n ruff check examples tests src utils scripts\n ruff format examples tests src utils scripts --check\n\n check_repository_consistency:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v3\n - name: Set up Python\n uses: actions/setup-python@v4\n with:\n python-version: \"3.8\"\n - name: Install dependencies\n run: |\n python -m pip install --upgrade pip\n pip install .[quality]\n - name: Check quality\n run: |\n python utils/check_copies.py\n python utils/check_dummies.py\n make deps_table_check_updated\n", "logs": [{"step_name": "check_code_quality/5_Check quality.txt", "log": "##[group]Run ruff check examples tests src utils scripts\n\u001b[36;1mruff check examples tests src utils scripts\u001b[0m\n\u001b[36;1mruff format examples tests src utils scripts --check\u001b[0m\nshell: /usr/bin/bash -e {0}\nenv:\n pythonLocation: /opt/actions-runner/_work/_tool/Python/3.8.18/x64\n PKG_CONFIG_PATH: /opt/actions-runner/_work/_tool/Python/3.8.18/x64/lib/pkgconfig\n Python_ROOT_DIR: /opt/actions-runner/_work/_tool/Python/3.8.18/x64\n Python2_ROOT_DIR: /opt/actions-runner/_work/_tool/Python/3.8.18/x64\n Python3_ROOT_DIR: /opt/actions-runner/_work/_tool/Python/3.8.18/x64\n LD_LIBRARY_PATH: /opt/actions-runner/_work/_tool/Python/3.8.18/x64/lib\n##[endgroup]\nexamples/community/ip_adapter_face_id.py:15:1: I001 [*] Import block is un-sorted or un-formatted\nFound 1 error.\n[*] 1 fixable with the `--fix` option.\n##[error]Process completed with exit code 1.\n"}], "diff": "diff --git a/examples/community/README.md b/examples/community/README.md\nindex f205f3b70..2fdbdb414 100755\n--- a/examples/community/README.md\n+++ b/examples/community/README.md\n@@ -3307,7 +3307,7 @@ pipeline = DiffusionPipeline.from_pretrained(\n torch_dtype=torch.float16,\n scheduler=noise_scheduler,\n vae=vae,\n- custom_pipeline=\"./forked/diffusers/examples/community/ip_adapter_face_id.py\"\n+ custom_pipeline=\"ip_adapter_face_id\"\n )\n pipeline.load_ip_adapter_face_id(\"h94/IP-Adapter-FaceID\", \"ip-adapter-faceid_sd15.bin\")\n pipeline.to(\"cuda\")\ndiff --git a/examples/community/ip_adapter_face_id.py b/examples/community/ip_adapter_face_id.py\nindex e3c5a2c84..d9325742c 100644\n--- a/examples/community/ip_adapter_face_id.py\n+++ b/examples/community/ip_adapter_face_id.py\n@@ -14,12 +14,12 @@\n \n import inspect\n from typing import Any, Callable, Dict, List, Optional, Union\n-from safetensors import safe_open\n \n import torch\n import torch.nn as nn\n import torch.nn.functional as F\n from packaging import version\n+from safetensors import safe_open\n from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection\n \n from diffusers.configuration_utils import FrozenDict\n@@ -27,20 +27,20 @@ from diffusers.image_processor import VaeImageProcessor\n from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin\n from diffusers.models import AutoencoderKL, UNet2DConditionModel\n from diffusers.models.attention_processor import FusedAttnProcessor2_0\n-from diffusers.models.lora import adjust_lora_scale_text_encoder, LoRALinearLayer\n+from diffusers.models.lora import LoRALinearLayer, adjust_lora_scale_text_encoder\n+from diffusers.pipelines.pipeline_utils import DiffusionPipeline\n+from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput\n+from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker\n from diffusers.schedulers import KarrasDiffusionSchedulers\n from diffusers.utils import (\n- _get_model_file,\n USE_PEFT_BACKEND,\n+ _get_model_file,\n deprecate,\n logging,\n scale_lora_layers,\n unscale_lora_layers,\n )\n from diffusers.utils.torch_utils import randn_tensor\n-from diffusers.pipelines.pipeline_utils import DiffusionPipeline\n-from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput\n-from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker\n \n \n logger = logging.get_logger(__name__) # pylint: disable=invalid-name\n@@ -555,7 +555,7 @@ class IPAdapterFaceIDStableDiffusionPipeline(\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n- )\n+ )\n if weight_name.endswith(\".safetensors\"):\n state_dict = {\"image_proj\": {}, \"ip_adapter\": {}}\n with safe_open(model_file, framework=\"pt\", device=\"cpu\") as f:\n@@ -1438,7 +1438,7 @@ class IPAdapterFaceIDStableDiffusionPipeline(\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n \n # 6.1 Add image embeds for IP-Adapter\n- added_cond_kwargs ={\"image_embeds\": image_embeds} if image_embeds is not None else None\n+ added_cond_kwargs = {\"image_embeds\": image_embeds} if image_embeds is not None else None\n \n # 6.2 Optionally get Guidance Scale Embedding\n timestep_cond = None\n", "difficulty": "1"}