z1
Browse filesz11
This view is limited to 50 files because it contains too many changes.
See raw diff
- .env +22 -0
- .github/FUNDING.yml +1 -0
- .github/ISSUE_TEMPLATE/bug_report_template.yml +53 -0
- .github/ISSUE_TEMPLATE/feature_request.md +16 -0
- .github/dependabot.yml +11 -0
- .github/pull_request_template.md +3 -0
- .github/workflows/auto-release.yml +28 -0
- .github/workflows/stale.yml +22 -0
- .gitignore +50 -0
- CMD_FLAGS.txt +3 -0
- LICENSE +661 -0
- characters/Assistant.yaml +4 -0
- characters/Example.png +0 -0
- characters/Example.yaml +17 -0
- characters/Nex.png +0 -0
- characters/Nex.yaml +3 -0
- cmd_linux.sh +22 -0
- cmd_macos.sh +24 -0
- cmd_windows.bat +34 -0
- cmd_wsl.bat +11 -0
- convert-to-safetensors.py +38 -0
- extensions/Training_PRO/README.md +92 -0
- extensions/Training_PRO/custom_scheduler.py +433 -0
- extensions/Training_PRO/matplotgraph.py +62 -0
- extensions/Training_PRO/script.py +1308 -0
- extensions/Training_PRO/train_utils.py +368 -0
- extensions/character_bias/script.py +74 -0
- extensions/coqui_tts/harvard_sentences.txt +720 -0
- extensions/coqui_tts/languages.json +18 -0
- extensions/coqui_tts/requirements.txt +1 -0
- extensions/coqui_tts/script.py +228 -0
- extensions/coqui_tts/style.css +8 -0
- extensions/coqui_tts/voices/arnold.wav +0 -0
- extensions/coqui_tts/voices/female_01.wav +0 -0
- extensions/coqui_tts/voices/female_02.wav +0 -0
- extensions/example/script.py +139 -0
- extensions/gallery/__pycache__/script.cpython-311.pyc +0 -0
- extensions/gallery/script.js +40 -0
- extensions/gallery/script.py +129 -0
- extensions/google_translate/requirements.txt +1 -0
- extensions/google_translate/script.py +59 -0
- extensions/long_replies/script.py +143 -0
- extensions/multimodal/DOCS.md +85 -0
- extensions/multimodal/README.md +139 -0
- extensions/multimodal/abstract_pipeline.py +63 -0
- extensions/multimodal/multimodal_embedder.py +178 -0
- extensions/multimodal/pipeline_loader.py +52 -0
- extensions/multimodal/pipelines/llava/README.md +9 -0
- extensions/multimodal/pipelines/llava/llava.py +262 -0
- extensions/multimodal/pipelines/llava/pipelines.py +48 -0
.env
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# by default the Dockerfile specifies these versions: 3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX
|
2 |
+
# however for me to work i had to specify the exact version for my card ( 2060 ) it was 7.5
|
3 |
+
# https://developer.nvidia.com/cuda-gpus you can find the version for your card here
|
4 |
+
TORCH_CUDA_ARCH_LIST=7.5
|
5 |
+
# your command-line flags go here:
|
6 |
+
CLI_ARGS=--listen
|
7 |
+
# the port the webui binds to on the host
|
8 |
+
HOST_PORT=7860
|
9 |
+
# the port the webui binds to inside the container
|
10 |
+
CONTAINER_PORT=7860
|
11 |
+
# the port the api binds to on the host
|
12 |
+
HOST_API_PORT=5000
|
13 |
+
# the port the api binds to inside the container
|
14 |
+
CONTAINER_API_PORT=5000
|
15 |
+
# Comma separated extensions to build
|
16 |
+
BUILD_EXTENSIONS=""
|
17 |
+
# Set APP_RUNTIME_GID to an appropriate host system group to enable access to mounted volumes
|
18 |
+
# You can find your current host user group id with the command `id -g`
|
19 |
+
APP_RUNTIME_GID=6972
|
20 |
+
# override default app build permissions (handy for deploying to cloud)
|
21 |
+
#APP_GID=6972
|
22 |
+
#APP_UID=6972
|
.github/FUNDING.yml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ko_fi: oobabooga
|
.github/ISSUE_TEMPLATE/bug_report_template.yml
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "Bug report"
|
2 |
+
description: Report a bug
|
3 |
+
labels: [ "bug" ]
|
4 |
+
body:
|
5 |
+
- type: markdown
|
6 |
+
attributes:
|
7 |
+
value: |
|
8 |
+
Thanks for taking the time to fill out this bug report!
|
9 |
+
- type: textarea
|
10 |
+
id: bug-description
|
11 |
+
attributes:
|
12 |
+
label: Describe the bug
|
13 |
+
description: A clear and concise description of what the bug is.
|
14 |
+
placeholder: Bug description
|
15 |
+
validations:
|
16 |
+
required: true
|
17 |
+
- type: checkboxes
|
18 |
+
attributes:
|
19 |
+
label: Is there an existing issue for this?
|
20 |
+
description: Please search to see if an issue already exists for the issue you encountered.
|
21 |
+
options:
|
22 |
+
- label: I have searched the existing issues
|
23 |
+
required: true
|
24 |
+
- type: textarea
|
25 |
+
id: reproduction
|
26 |
+
attributes:
|
27 |
+
label: Reproduction
|
28 |
+
description: Please provide the steps necessary to reproduce your issue.
|
29 |
+
placeholder: Reproduction
|
30 |
+
validations:
|
31 |
+
required: true
|
32 |
+
- type: textarea
|
33 |
+
id: screenshot
|
34 |
+
attributes:
|
35 |
+
label: Screenshot
|
36 |
+
description: "If possible, please include screenshot(s) so that we can understand what the issue is."
|
37 |
+
- type: textarea
|
38 |
+
id: logs
|
39 |
+
attributes:
|
40 |
+
label: Logs
|
41 |
+
description: "Please include the full stacktrace of the errors you get in the command-line (if any)."
|
42 |
+
render: shell
|
43 |
+
validations:
|
44 |
+
required: true
|
45 |
+
- type: textarea
|
46 |
+
id: system-info
|
47 |
+
attributes:
|
48 |
+
label: System Info
|
49 |
+
description: "Please share your system info with us: operating system, GPU brand, and GPU model. If you are using a Google Colab notebook, mention that instead."
|
50 |
+
render: shell
|
51 |
+
placeholder:
|
52 |
+
validations:
|
53 |
+
required: true
|
.github/ISSUE_TEMPLATE/feature_request.md
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
name: Feature request
|
3 |
+
about: Suggest an improvement or new feature for the web UI
|
4 |
+
title: ''
|
5 |
+
labels: 'enhancement'
|
6 |
+
assignees: ''
|
7 |
+
|
8 |
+
---
|
9 |
+
|
10 |
+
**Description**
|
11 |
+
|
12 |
+
A clear and concise description of what you want to be implemented.
|
13 |
+
|
14 |
+
**Additional Context**
|
15 |
+
|
16 |
+
If applicable, please provide any extra information, external links, or screenshots that could be useful.
|
.github/dependabot.yml
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# To get started with Dependabot version updates, you'll need to specify which
|
2 |
+
# package ecosystems to update and where the package manifests are located.
|
3 |
+
# Please see the documentation for all configuration options:
|
4 |
+
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
5 |
+
|
6 |
+
version: 2
|
7 |
+
updates:
|
8 |
+
- package-ecosystem: "pip" # See documentation for possible values
|
9 |
+
directory: "/" # Location of package manifests
|
10 |
+
schedule:
|
11 |
+
interval: "weekly"
|
.github/pull_request_template.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
## Checklist:
|
2 |
+
|
3 |
+
- [ ] I have read the [Contributing guidelines](https://github.com/oobabooga/text-generation-webui/wiki/Contributing-guidelines).
|
.github/workflows/auto-release.yml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Weekly Snapshot Release
|
2 |
+
on:
|
3 |
+
schedule:
|
4 |
+
- cron: '15 20 * * 0'
|
5 |
+
workflow_dispatch: {}
|
6 |
+
|
7 |
+
jobs:
|
8 |
+
create_release:
|
9 |
+
runs-on: ubuntu-latest
|
10 |
+
steps:
|
11 |
+
- name: Checkout code
|
12 |
+
uses: actions/checkout@v2
|
13 |
+
|
14 |
+
- name: Set snapshot tag
|
15 |
+
id: set_snapshot_tag
|
16 |
+
run: echo ::set-output name=tag::snapshot-$(date +'%Y-%m-%d')
|
17 |
+
|
18 |
+
- name: Create release
|
19 |
+
id: create_release
|
20 |
+
uses: softprops/action-gh-release@v1
|
21 |
+
with:
|
22 |
+
generate_release_notes: true
|
23 |
+
tag_name: ${{ steps.set_snapshot_tag.outputs.tag }}
|
24 |
+
name: ${{ steps.set_snapshot_tag.outputs.tag }}
|
25 |
+
draft: false
|
26 |
+
prerelease: false
|
27 |
+
env:
|
28 |
+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
.github/workflows/stale.yml
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Close inactive issues
|
2 |
+
on:
|
3 |
+
schedule:
|
4 |
+
- cron: "10 23 * * *"
|
5 |
+
|
6 |
+
jobs:
|
7 |
+
close-issues:
|
8 |
+
runs-on: ubuntu-latest
|
9 |
+
permissions:
|
10 |
+
issues: write
|
11 |
+
pull-requests: write
|
12 |
+
steps:
|
13 |
+
- uses: actions/stale@v5
|
14 |
+
with:
|
15 |
+
stale-issue-message: ""
|
16 |
+
close-issue-message: "This issue has been closed due to inactivity for 6 weeks. If you believe it is still relevant, please leave a comment below. You can tag a developer in your comment."
|
17 |
+
days-before-issue-stale: 42
|
18 |
+
days-before-issue-close: 0
|
19 |
+
stale-issue-label: "stale"
|
20 |
+
days-before-pr-stale: -1
|
21 |
+
days-before-pr-close: -1
|
22 |
+
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
.gitignore
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/cache
|
2 |
+
/characters
|
3 |
+
/css
|
4 |
+
/extensions
|
5 |
+
/grammars
|
6 |
+
/installer_files
|
7 |
+
/logs
|
8 |
+
/loras
|
9 |
+
/models
|
10 |
+
/presets
|
11 |
+
/prompts
|
12 |
+
/repositories
|
13 |
+
/softprompts
|
14 |
+
/torch-dumps
|
15 |
+
/training/datasets
|
16 |
+
|
17 |
+
/CMD_FLAGS.txt
|
18 |
+
/img_bot*
|
19 |
+
/img_me*
|
20 |
+
/models/config-user.yaml
|
21 |
+
/notification.mp3
|
22 |
+
/settings*.json
|
23 |
+
/settings*.yaml
|
24 |
+
|
25 |
+
.chroma
|
26 |
+
.DS_Store
|
27 |
+
.eslintrc.js
|
28 |
+
.idea
|
29 |
+
.venv
|
30 |
+
venv
|
31 |
+
.envrc
|
32 |
+
.direnv
|
33 |
+
.vs
|
34 |
+
.vscode
|
35 |
+
*.bak
|
36 |
+
*.ipynb
|
37 |
+
*.log
|
38 |
+
*pycache*
|
39 |
+
cert.pem
|
40 |
+
key.pem
|
41 |
+
package.json
|
42 |
+
package-lock.json
|
43 |
+
Thumbs.db
|
44 |
+
wandb
|
45 |
+
|
46 |
+
# ignore user docker config and top level links to docker files
|
47 |
+
/docker-compose.yaml
|
48 |
+
/docker-compose.yml
|
49 |
+
/Dockerfile
|
50 |
+
.env
|
CMD_FLAGS.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Only used by the one-click installer.
|
2 |
+
# Example:
|
3 |
+
# --listen --api
|
LICENSE
ADDED
@@ -0,0 +1,661 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GNU AFFERO GENERAL PUBLIC LICENSE
|
2 |
+
Version 3, 19 November 2007
|
3 |
+
|
4 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
5 |
+
Everyone is permitted to copy and distribute verbatim copies
|
6 |
+
of this license document, but changing it is not allowed.
|
7 |
+
|
8 |
+
Preamble
|
9 |
+
|
10 |
+
The GNU Affero General Public License is a free, copyleft license for
|
11 |
+
software and other kinds of works, specifically designed to ensure
|
12 |
+
cooperation with the community in the case of network server software.
|
13 |
+
|
14 |
+
The licenses for most software and other practical works are designed
|
15 |
+
to take away your freedom to share and change the works. By contrast,
|
16 |
+
our General Public Licenses are intended to guarantee your freedom to
|
17 |
+
share and change all versions of a program--to make sure it remains free
|
18 |
+
software for all its users.
|
19 |
+
|
20 |
+
When we speak of free software, we are referring to freedom, not
|
21 |
+
price. Our General Public Licenses are designed to make sure that you
|
22 |
+
have the freedom to distribute copies of free software (and charge for
|
23 |
+
them if you wish), that you receive source code or can get it if you
|
24 |
+
want it, that you can change the software or use pieces of it in new
|
25 |
+
free programs, and that you know you can do these things.
|
26 |
+
|
27 |
+
Developers that use our General Public Licenses protect your rights
|
28 |
+
with two steps: (1) assert copyright on the software, and (2) offer
|
29 |
+
you this License which gives you legal permission to copy, distribute
|
30 |
+
and/or modify the software.
|
31 |
+
|
32 |
+
A secondary benefit of defending all users' freedom is that
|
33 |
+
improvements made in alternate versions of the program, if they
|
34 |
+
receive widespread use, become available for other developers to
|
35 |
+
incorporate. Many developers of free software are heartened and
|
36 |
+
encouraged by the resulting cooperation. However, in the case of
|
37 |
+
software used on network servers, this result may fail to come about.
|
38 |
+
The GNU General Public License permits making a modified version and
|
39 |
+
letting the public access it on a server without ever releasing its
|
40 |
+
source code to the public.
|
41 |
+
|
42 |
+
The GNU Affero General Public License is designed specifically to
|
43 |
+
ensure that, in such cases, the modified source code becomes available
|
44 |
+
to the community. It requires the operator of a network server to
|
45 |
+
provide the source code of the modified version running there to the
|
46 |
+
users of that server. Therefore, public use of a modified version, on
|
47 |
+
a publicly accessible server, gives the public access to the source
|
48 |
+
code of the modified version.
|
49 |
+
|
50 |
+
An older license, called the Affero General Public License and
|
51 |
+
published by Affero, was designed to accomplish similar goals. This is
|
52 |
+
a different license, not a version of the Affero GPL, but Affero has
|
53 |
+
released a new version of the Affero GPL which permits relicensing under
|
54 |
+
this license.
|
55 |
+
|
56 |
+
The precise terms and conditions for copying, distribution and
|
57 |
+
modification follow.
|
58 |
+
|
59 |
+
TERMS AND CONDITIONS
|
60 |
+
|
61 |
+
0. Definitions.
|
62 |
+
|
63 |
+
"This License" refers to version 3 of the GNU Affero General Public License.
|
64 |
+
|
65 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
66 |
+
works, such as semiconductor masks.
|
67 |
+
|
68 |
+
"The Program" refers to any copyrightable work licensed under this
|
69 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
70 |
+
"recipients" may be individuals or organizations.
|
71 |
+
|
72 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
73 |
+
in a fashion requiring copyright permission, other than the making of an
|
74 |
+
exact copy. The resulting work is called a "modified version" of the
|
75 |
+
earlier work or a work "based on" the earlier work.
|
76 |
+
|
77 |
+
A "covered work" means either the unmodified Program or a work based
|
78 |
+
on the Program.
|
79 |
+
|
80 |
+
To "propagate" a work means to do anything with it that, without
|
81 |
+
permission, would make you directly or secondarily liable for
|
82 |
+
infringement under applicable copyright law, except executing it on a
|
83 |
+
computer or modifying a private copy. Propagation includes copying,
|
84 |
+
distribution (with or without modification), making available to the
|
85 |
+
public, and in some countries other activities as well.
|
86 |
+
|
87 |
+
To "convey" a work means any kind of propagation that enables other
|
88 |
+
parties to make or receive copies. Mere interaction with a user through
|
89 |
+
a computer network, with no transfer of a copy, is not conveying.
|
90 |
+
|
91 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
92 |
+
to the extent that it includes a convenient and prominently visible
|
93 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
94 |
+
tells the user that there is no warranty for the work (except to the
|
95 |
+
extent that warranties are provided), that licensees may convey the
|
96 |
+
work under this License, and how to view a copy of this License. If
|
97 |
+
the interface presents a list of user commands or options, such as a
|
98 |
+
menu, a prominent item in the list meets this criterion.
|
99 |
+
|
100 |
+
1. Source Code.
|
101 |
+
|
102 |
+
The "source code" for a work means the preferred form of the work
|
103 |
+
for making modifications to it. "Object code" means any non-source
|
104 |
+
form of a work.
|
105 |
+
|
106 |
+
A "Standard Interface" means an interface that either is an official
|
107 |
+
standard defined by a recognized standards body, or, in the case of
|
108 |
+
interfaces specified for a particular programming language, one that
|
109 |
+
is widely used among developers working in that language.
|
110 |
+
|
111 |
+
The "System Libraries" of an executable work include anything, other
|
112 |
+
than the work as a whole, that (a) is included in the normal form of
|
113 |
+
packaging a Major Component, but which is not part of that Major
|
114 |
+
Component, and (b) serves only to enable use of the work with that
|
115 |
+
Major Component, or to implement a Standard Interface for which an
|
116 |
+
implementation is available to the public in source code form. A
|
117 |
+
"Major Component", in this context, means a major essential component
|
118 |
+
(kernel, window system, and so on) of the specific operating system
|
119 |
+
(if any) on which the executable work runs, or a compiler used to
|
120 |
+
produce the work, or an object code interpreter used to run it.
|
121 |
+
|
122 |
+
The "Corresponding Source" for a work in object code form means all
|
123 |
+
the source code needed to generate, install, and (for an executable
|
124 |
+
work) run the object code and to modify the work, including scripts to
|
125 |
+
control those activities. However, it does not include the work's
|
126 |
+
System Libraries, or general-purpose tools or generally available free
|
127 |
+
programs which are used unmodified in performing those activities but
|
128 |
+
which are not part of the work. For example, Corresponding Source
|
129 |
+
includes interface definition files associated with source files for
|
130 |
+
the work, and the source code for shared libraries and dynamically
|
131 |
+
linked subprograms that the work is specifically designed to require,
|
132 |
+
such as by intimate data communication or control flow between those
|
133 |
+
subprograms and other parts of the work.
|
134 |
+
|
135 |
+
The Corresponding Source need not include anything that users
|
136 |
+
can regenerate automatically from other parts of the Corresponding
|
137 |
+
Source.
|
138 |
+
|
139 |
+
The Corresponding Source for a work in source code form is that
|
140 |
+
same work.
|
141 |
+
|
142 |
+
2. Basic Permissions.
|
143 |
+
|
144 |
+
All rights granted under this License are granted for the term of
|
145 |
+
copyright on the Program, and are irrevocable provided the stated
|
146 |
+
conditions are met. This License explicitly affirms your unlimited
|
147 |
+
permission to run the unmodified Program. The output from running a
|
148 |
+
covered work is covered by this License only if the output, given its
|
149 |
+
content, constitutes a covered work. This License acknowledges your
|
150 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
151 |
+
|
152 |
+
You may make, run and propagate covered works that you do not
|
153 |
+
convey, without conditions so long as your license otherwise remains
|
154 |
+
in force. You may convey covered works to others for the sole purpose
|
155 |
+
of having them make modifications exclusively for you, or provide you
|
156 |
+
with facilities for running those works, provided that you comply with
|
157 |
+
the terms of this License in conveying all material for which you do
|
158 |
+
not control copyright. Those thus making or running the covered works
|
159 |
+
for you must do so exclusively on your behalf, under your direction
|
160 |
+
and control, on terms that prohibit them from making any copies of
|
161 |
+
your copyrighted material outside their relationship with you.
|
162 |
+
|
163 |
+
Conveying under any other circumstances is permitted solely under
|
164 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
165 |
+
makes it unnecessary.
|
166 |
+
|
167 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
168 |
+
|
169 |
+
No covered work shall be deemed part of an effective technological
|
170 |
+
measure under any applicable law fulfilling obligations under article
|
171 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
172 |
+
similar laws prohibiting or restricting circumvention of such
|
173 |
+
measures.
|
174 |
+
|
175 |
+
When you convey a covered work, you waive any legal power to forbid
|
176 |
+
circumvention of technological measures to the extent such circumvention
|
177 |
+
is effected by exercising rights under this License with respect to
|
178 |
+
the covered work, and you disclaim any intention to limit operation or
|
179 |
+
modification of the work as a means of enforcing, against the work's
|
180 |
+
users, your or third parties' legal rights to forbid circumvention of
|
181 |
+
technological measures.
|
182 |
+
|
183 |
+
4. Conveying Verbatim Copies.
|
184 |
+
|
185 |
+
You may convey verbatim copies of the Program's source code as you
|
186 |
+
receive it, in any medium, provided that you conspicuously and
|
187 |
+
appropriately publish on each copy an appropriate copyright notice;
|
188 |
+
keep intact all notices stating that this License and any
|
189 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
190 |
+
keep intact all notices of the absence of any warranty; and give all
|
191 |
+
recipients a copy of this License along with the Program.
|
192 |
+
|
193 |
+
You may charge any price or no price for each copy that you convey,
|
194 |
+
and you may offer support or warranty protection for a fee.
|
195 |
+
|
196 |
+
5. Conveying Modified Source Versions.
|
197 |
+
|
198 |
+
You may convey a work based on the Program, or the modifications to
|
199 |
+
produce it from the Program, in the form of source code under the
|
200 |
+
terms of section 4, provided that you also meet all of these conditions:
|
201 |
+
|
202 |
+
a) The work must carry prominent notices stating that you modified
|
203 |
+
it, and giving a relevant date.
|
204 |
+
|
205 |
+
b) The work must carry prominent notices stating that it is
|
206 |
+
released under this License and any conditions added under section
|
207 |
+
7. This requirement modifies the requirement in section 4 to
|
208 |
+
"keep intact all notices".
|
209 |
+
|
210 |
+
c) You must license the entire work, as a whole, under this
|
211 |
+
License to anyone who comes into possession of a copy. This
|
212 |
+
License will therefore apply, along with any applicable section 7
|
213 |
+
additional terms, to the whole of the work, and all its parts,
|
214 |
+
regardless of how they are packaged. This License gives no
|
215 |
+
permission to license the work in any other way, but it does not
|
216 |
+
invalidate such permission if you have separately received it.
|
217 |
+
|
218 |
+
d) If the work has interactive user interfaces, each must display
|
219 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
220 |
+
interfaces that do not display Appropriate Legal Notices, your
|
221 |
+
work need not make them do so.
|
222 |
+
|
223 |
+
A compilation of a covered work with other separate and independent
|
224 |
+
works, which are not by their nature extensions of the covered work,
|
225 |
+
and which are not combined with it such as to form a larger program,
|
226 |
+
in or on a volume of a storage or distribution medium, is called an
|
227 |
+
"aggregate" if the compilation and its resulting copyright are not
|
228 |
+
used to limit the access or legal rights of the compilation's users
|
229 |
+
beyond what the individual works permit. Inclusion of a covered work
|
230 |
+
in an aggregate does not cause this License to apply to the other
|
231 |
+
parts of the aggregate.
|
232 |
+
|
233 |
+
6. Conveying Non-Source Forms.
|
234 |
+
|
235 |
+
You may convey a covered work in object code form under the terms
|
236 |
+
of sections 4 and 5, provided that you also convey the
|
237 |
+
machine-readable Corresponding Source under the terms of this License,
|
238 |
+
in one of these ways:
|
239 |
+
|
240 |
+
a) Convey the object code in, or embodied in, a physical product
|
241 |
+
(including a physical distribution medium), accompanied by the
|
242 |
+
Corresponding Source fixed on a durable physical medium
|
243 |
+
customarily used for software interchange.
|
244 |
+
|
245 |
+
b) Convey the object code in, or embodied in, a physical product
|
246 |
+
(including a physical distribution medium), accompanied by a
|
247 |
+
written offer, valid for at least three years and valid for as
|
248 |
+
long as you offer spare parts or customer support for that product
|
249 |
+
model, to give anyone who possesses the object code either (1) a
|
250 |
+
copy of the Corresponding Source for all the software in the
|
251 |
+
product that is covered by this License, on a durable physical
|
252 |
+
medium customarily used for software interchange, for a price no
|
253 |
+
more than your reasonable cost of physically performing this
|
254 |
+
conveying of source, or (2) access to copy the
|
255 |
+
Corresponding Source from a network server at no charge.
|
256 |
+
|
257 |
+
c) Convey individual copies of the object code with a copy of the
|
258 |
+
written offer to provide the Corresponding Source. This
|
259 |
+
alternative is allowed only occasionally and noncommercially, and
|
260 |
+
only if you received the object code with such an offer, in accord
|
261 |
+
with subsection 6b.
|
262 |
+
|
263 |
+
d) Convey the object code by offering access from a designated
|
264 |
+
place (gratis or for a charge), and offer equivalent access to the
|
265 |
+
Corresponding Source in the same way through the same place at no
|
266 |
+
further charge. You need not require recipients to copy the
|
267 |
+
Corresponding Source along with the object code. If the place to
|
268 |
+
copy the object code is a network server, the Corresponding Source
|
269 |
+
may be on a different server (operated by you or a third party)
|
270 |
+
that supports equivalent copying facilities, provided you maintain
|
271 |
+
clear directions next to the object code saying where to find the
|
272 |
+
Corresponding Source. Regardless of what server hosts the
|
273 |
+
Corresponding Source, you remain obligated to ensure that it is
|
274 |
+
available for as long as needed to satisfy these requirements.
|
275 |
+
|
276 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
277 |
+
you inform other peers where the object code and Corresponding
|
278 |
+
Source of the work are being offered to the general public at no
|
279 |
+
charge under subsection 6d.
|
280 |
+
|
281 |
+
A separable portion of the object code, whose source code is excluded
|
282 |
+
from the Corresponding Source as a System Library, need not be
|
283 |
+
included in conveying the object code work.
|
284 |
+
|
285 |
+
A "User Product" is either (1) a "consumer product", which means any
|
286 |
+
tangible personal property which is normally used for personal, family,
|
287 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
288 |
+
into a dwelling. In determining whether a product is a consumer product,
|
289 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
290 |
+
product received by a particular user, "normally used" refers to a
|
291 |
+
typical or common use of that class of product, regardless of the status
|
292 |
+
of the particular user or of the way in which the particular user
|
293 |
+
actually uses, or expects or is expected to use, the product. A product
|
294 |
+
is a consumer product regardless of whether the product has substantial
|
295 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
296 |
+
the only significant mode of use of the product.
|
297 |
+
|
298 |
+
"Installation Information" for a User Product means any methods,
|
299 |
+
procedures, authorization keys, or other information required to install
|
300 |
+
and execute modified versions of a covered work in that User Product from
|
301 |
+
a modified version of its Corresponding Source. The information must
|
302 |
+
suffice to ensure that the continued functioning of the modified object
|
303 |
+
code is in no case prevented or interfered with solely because
|
304 |
+
modification has been made.
|
305 |
+
|
306 |
+
If you convey an object code work under this section in, or with, or
|
307 |
+
specifically for use in, a User Product, and the conveying occurs as
|
308 |
+
part of a transaction in which the right of possession and use of the
|
309 |
+
User Product is transferred to the recipient in perpetuity or for a
|
310 |
+
fixed term (regardless of how the transaction is characterized), the
|
311 |
+
Corresponding Source conveyed under this section must be accompanied
|
312 |
+
by the Installation Information. But this requirement does not apply
|
313 |
+
if neither you nor any third party retains the ability to install
|
314 |
+
modified object code on the User Product (for example, the work has
|
315 |
+
been installed in ROM).
|
316 |
+
|
317 |
+
The requirement to provide Installation Information does not include a
|
318 |
+
requirement to continue to provide support service, warranty, or updates
|
319 |
+
for a work that has been modified or installed by the recipient, or for
|
320 |
+
the User Product in which it has been modified or installed. Access to a
|
321 |
+
network may be denied when the modification itself materially and
|
322 |
+
adversely affects the operation of the network or violates the rules and
|
323 |
+
protocols for communication across the network.
|
324 |
+
|
325 |
+
Corresponding Source conveyed, and Installation Information provided,
|
326 |
+
in accord with this section must be in a format that is publicly
|
327 |
+
documented (and with an implementation available to the public in
|
328 |
+
source code form), and must require no special password or key for
|
329 |
+
unpacking, reading or copying.
|
330 |
+
|
331 |
+
7. Additional Terms.
|
332 |
+
|
333 |
+
"Additional permissions" are terms that supplement the terms of this
|
334 |
+
License by making exceptions from one or more of its conditions.
|
335 |
+
Additional permissions that are applicable to the entire Program shall
|
336 |
+
be treated as though they were included in this License, to the extent
|
337 |
+
that they are valid under applicable law. If additional permissions
|
338 |
+
apply only to part of the Program, that part may be used separately
|
339 |
+
under those permissions, but the entire Program remains governed by
|
340 |
+
this License without regard to the additional permissions.
|
341 |
+
|
342 |
+
When you convey a copy of a covered work, you may at your option
|
343 |
+
remove any additional permissions from that copy, or from any part of
|
344 |
+
it. (Additional permissions may be written to require their own
|
345 |
+
removal in certain cases when you modify the work.) You may place
|
346 |
+
additional permissions on material, added by you to a covered work,
|
347 |
+
for which you have or can give appropriate copyright permission.
|
348 |
+
|
349 |
+
Notwithstanding any other provision of this License, for material you
|
350 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
351 |
+
that material) supplement the terms of this License with terms:
|
352 |
+
|
353 |
+
a) Disclaiming warranty or limiting liability differently from the
|
354 |
+
terms of sections 15 and 16 of this License; or
|
355 |
+
|
356 |
+
b) Requiring preservation of specified reasonable legal notices or
|
357 |
+
author attributions in that material or in the Appropriate Legal
|
358 |
+
Notices displayed by works containing it; or
|
359 |
+
|
360 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
361 |
+
requiring that modified versions of such material be marked in
|
362 |
+
reasonable ways as different from the original version; or
|
363 |
+
|
364 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
365 |
+
authors of the material; or
|
366 |
+
|
367 |
+
e) Declining to grant rights under trademark law for use of some
|
368 |
+
trade names, trademarks, or service marks; or
|
369 |
+
|
370 |
+
f) Requiring indemnification of licensors and authors of that
|
371 |
+
material by anyone who conveys the material (or modified versions of
|
372 |
+
it) with contractual assumptions of liability to the recipient, for
|
373 |
+
any liability that these contractual assumptions directly impose on
|
374 |
+
those licensors and authors.
|
375 |
+
|
376 |
+
All other non-permissive additional terms are considered "further
|
377 |
+
restrictions" within the meaning of section 10. If the Program as you
|
378 |
+
received it, or any part of it, contains a notice stating that it is
|
379 |
+
governed by this License along with a term that is a further
|
380 |
+
restriction, you may remove that term. If a license document contains
|
381 |
+
a further restriction but permits relicensing or conveying under this
|
382 |
+
License, you may add to a covered work material governed by the terms
|
383 |
+
of that license document, provided that the further restriction does
|
384 |
+
not survive such relicensing or conveying.
|
385 |
+
|
386 |
+
If you add terms to a covered work in accord with this section, you
|
387 |
+
must place, in the relevant source files, a statement of the
|
388 |
+
additional terms that apply to those files, or a notice indicating
|
389 |
+
where to find the applicable terms.
|
390 |
+
|
391 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
392 |
+
form of a separately written license, or stated as exceptions;
|
393 |
+
the above requirements apply either way.
|
394 |
+
|
395 |
+
8. Termination.
|
396 |
+
|
397 |
+
You may not propagate or modify a covered work except as expressly
|
398 |
+
provided under this License. Any attempt otherwise to propagate or
|
399 |
+
modify it is void, and will automatically terminate your rights under
|
400 |
+
this License (including any patent licenses granted under the third
|
401 |
+
paragraph of section 11).
|
402 |
+
|
403 |
+
However, if you cease all violation of this License, then your
|
404 |
+
license from a particular copyright holder is reinstated (a)
|
405 |
+
provisionally, unless and until the copyright holder explicitly and
|
406 |
+
finally terminates your license, and (b) permanently, if the copyright
|
407 |
+
holder fails to notify you of the violation by some reasonable means
|
408 |
+
prior to 60 days after the cessation.
|
409 |
+
|
410 |
+
Moreover, your license from a particular copyright holder is
|
411 |
+
reinstated permanently if the copyright holder notifies you of the
|
412 |
+
violation by some reasonable means, this is the first time you have
|
413 |
+
received notice of violation of this License (for any work) from that
|
414 |
+
copyright holder, and you cure the violation prior to 30 days after
|
415 |
+
your receipt of the notice.
|
416 |
+
|
417 |
+
Termination of your rights under this section does not terminate the
|
418 |
+
licenses of parties who have received copies or rights from you under
|
419 |
+
this License. If your rights have been terminated and not permanently
|
420 |
+
reinstated, you do not qualify to receive new licenses for the same
|
421 |
+
material under section 10.
|
422 |
+
|
423 |
+
9. Acceptance Not Required for Having Copies.
|
424 |
+
|
425 |
+
You are not required to accept this License in order to receive or
|
426 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
427 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
428 |
+
to receive a copy likewise does not require acceptance. However,
|
429 |
+
nothing other than this License grants you permission to propagate or
|
430 |
+
modify any covered work. These actions infringe copyright if you do
|
431 |
+
not accept this License. Therefore, by modifying or propagating a
|
432 |
+
covered work, you indicate your acceptance of this License to do so.
|
433 |
+
|
434 |
+
10. Automatic Licensing of Downstream Recipients.
|
435 |
+
|
436 |
+
Each time you convey a covered work, the recipient automatically
|
437 |
+
receives a license from the original licensors, to run, modify and
|
438 |
+
propagate that work, subject to this License. You are not responsible
|
439 |
+
for enforcing compliance by third parties with this License.
|
440 |
+
|
441 |
+
An "entity transaction" is a transaction transferring control of an
|
442 |
+
organization, or substantially all assets of one, or subdividing an
|
443 |
+
organization, or merging organizations. If propagation of a covered
|
444 |
+
work results from an entity transaction, each party to that
|
445 |
+
transaction who receives a copy of the work also receives whatever
|
446 |
+
licenses to the work the party's predecessor in interest had or could
|
447 |
+
give under the previous paragraph, plus a right to possession of the
|
448 |
+
Corresponding Source of the work from the predecessor in interest, if
|
449 |
+
the predecessor has it or can get it with reasonable efforts.
|
450 |
+
|
451 |
+
You may not impose any further restrictions on the exercise of the
|
452 |
+
rights granted or affirmed under this License. For example, you may
|
453 |
+
not impose a license fee, royalty, or other charge for exercise of
|
454 |
+
rights granted under this License, and you may not initiate litigation
|
455 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
456 |
+
any patent claim is infringed by making, using, selling, offering for
|
457 |
+
sale, or importing the Program or any portion of it.
|
458 |
+
|
459 |
+
11. Patents.
|
460 |
+
|
461 |
+
A "contributor" is a copyright holder who authorizes use under this
|
462 |
+
License of the Program or a work on which the Program is based. The
|
463 |
+
work thus licensed is called the contributor's "contributor version".
|
464 |
+
|
465 |
+
A contributor's "essential patent claims" are all patent claims
|
466 |
+
owned or controlled by the contributor, whether already acquired or
|
467 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
468 |
+
by this License, of making, using, or selling its contributor version,
|
469 |
+
but do not include claims that would be infringed only as a
|
470 |
+
consequence of further modification of the contributor version. For
|
471 |
+
purposes of this definition, "control" includes the right to grant
|
472 |
+
patent sublicenses in a manner consistent with the requirements of
|
473 |
+
this License.
|
474 |
+
|
475 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
476 |
+
patent license under the contributor's essential patent claims, to
|
477 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
478 |
+
propagate the contents of its contributor version.
|
479 |
+
|
480 |
+
In the following three paragraphs, a "patent license" is any express
|
481 |
+
agreement or commitment, however denominated, not to enforce a patent
|
482 |
+
(such as an express permission to practice a patent or covenant not to
|
483 |
+
sue for patent infringement). To "grant" such a patent license to a
|
484 |
+
party means to make such an agreement or commitment not to enforce a
|
485 |
+
patent against the party.
|
486 |
+
|
487 |
+
If you convey a covered work, knowingly relying on a patent license,
|
488 |
+
and the Corresponding Source of the work is not available for anyone
|
489 |
+
to copy, free of charge and under the terms of this License, through a
|
490 |
+
publicly available network server or other readily accessible means,
|
491 |
+
then you must either (1) cause the Corresponding Source to be so
|
492 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
493 |
+
patent license for this particular work, or (3) arrange, in a manner
|
494 |
+
consistent with the requirements of this License, to extend the patent
|
495 |
+
license to downstream recipients. "Knowingly relying" means you have
|
496 |
+
actual knowledge that, but for the patent license, your conveying the
|
497 |
+
covered work in a country, or your recipient's use of the covered work
|
498 |
+
in a country, would infringe one or more identifiable patents in that
|
499 |
+
country that you have reason to believe are valid.
|
500 |
+
|
501 |
+
If, pursuant to or in connection with a single transaction or
|
502 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
503 |
+
covered work, and grant a patent license to some of the parties
|
504 |
+
receiving the covered work authorizing them to use, propagate, modify
|
505 |
+
or convey a specific copy of the covered work, then the patent license
|
506 |
+
you grant is automatically extended to all recipients of the covered
|
507 |
+
work and works based on it.
|
508 |
+
|
509 |
+
A patent license is "discriminatory" if it does not include within
|
510 |
+
the scope of its coverage, prohibits the exercise of, or is
|
511 |
+
conditioned on the non-exercise of one or more of the rights that are
|
512 |
+
specifically granted under this License. You may not convey a covered
|
513 |
+
work if you are a party to an arrangement with a third party that is
|
514 |
+
in the business of distributing software, under which you make payment
|
515 |
+
to the third party based on the extent of your activity of conveying
|
516 |
+
the work, and under which the third party grants, to any of the
|
517 |
+
parties who would receive the covered work from you, a discriminatory
|
518 |
+
patent license (a) in connection with copies of the covered work
|
519 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
520 |
+
for and in connection with specific products or compilations that
|
521 |
+
contain the covered work, unless you entered into that arrangement,
|
522 |
+
or that patent license was granted, prior to 28 March 2007.
|
523 |
+
|
524 |
+
Nothing in this License shall be construed as excluding or limiting
|
525 |
+
any implied license or other defenses to infringement that may
|
526 |
+
otherwise be available to you under applicable patent law.
|
527 |
+
|
528 |
+
12. No Surrender of Others' Freedom.
|
529 |
+
|
530 |
+
If conditions are imposed on you (whether by court order, agreement or
|
531 |
+
otherwise) that contradict the conditions of this License, they do not
|
532 |
+
excuse you from the conditions of this License. If you cannot convey a
|
533 |
+
covered work so as to satisfy simultaneously your obligations under this
|
534 |
+
License and any other pertinent obligations, then as a consequence you may
|
535 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
536 |
+
to collect a royalty for further conveying from those to whom you convey
|
537 |
+
the Program, the only way you could satisfy both those terms and this
|
538 |
+
License would be to refrain entirely from conveying the Program.
|
539 |
+
|
540 |
+
13. Remote Network Interaction; Use with the GNU General Public License.
|
541 |
+
|
542 |
+
Notwithstanding any other provision of this License, if you modify the
|
543 |
+
Program, your modified version must prominently offer all users
|
544 |
+
interacting with it remotely through a computer network (if your version
|
545 |
+
supports such interaction) an opportunity to receive the Corresponding
|
546 |
+
Source of your version by providing access to the Corresponding Source
|
547 |
+
from a network server at no charge, through some standard or customary
|
548 |
+
means of facilitating copying of software. This Corresponding Source
|
549 |
+
shall include the Corresponding Source for any work covered by version 3
|
550 |
+
of the GNU General Public License that is incorporated pursuant to the
|
551 |
+
following paragraph.
|
552 |
+
|
553 |
+
Notwithstanding any other provision of this License, you have
|
554 |
+
permission to link or combine any covered work with a work licensed
|
555 |
+
under version 3 of the GNU General Public License into a single
|
556 |
+
combined work, and to convey the resulting work. The terms of this
|
557 |
+
License will continue to apply to the part which is the covered work,
|
558 |
+
but the work with which it is combined will remain governed by version
|
559 |
+
3 of the GNU General Public License.
|
560 |
+
|
561 |
+
14. Revised Versions of this License.
|
562 |
+
|
563 |
+
The Free Software Foundation may publish revised and/or new versions of
|
564 |
+
the GNU Affero General Public License from time to time. Such new versions
|
565 |
+
will be similar in spirit to the present version, but may differ in detail to
|
566 |
+
address new problems or concerns.
|
567 |
+
|
568 |
+
Each version is given a distinguishing version number. If the
|
569 |
+
Program specifies that a certain numbered version of the GNU Affero General
|
570 |
+
Public License "or any later version" applies to it, you have the
|
571 |
+
option of following the terms and conditions either of that numbered
|
572 |
+
version or of any later version published by the Free Software
|
573 |
+
Foundation. If the Program does not specify a version number of the
|
574 |
+
GNU Affero General Public License, you may choose any version ever published
|
575 |
+
by the Free Software Foundation.
|
576 |
+
|
577 |
+
If the Program specifies that a proxy can decide which future
|
578 |
+
versions of the GNU Affero General Public License can be used, that proxy's
|
579 |
+
public statement of acceptance of a version permanently authorizes you
|
580 |
+
to choose that version for the Program.
|
581 |
+
|
582 |
+
Later license versions may give you additional or different
|
583 |
+
permissions. However, no additional obligations are imposed on any
|
584 |
+
author or copyright holder as a result of your choosing to follow a
|
585 |
+
later version.
|
586 |
+
|
587 |
+
15. Disclaimer of Warranty.
|
588 |
+
|
589 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
590 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
591 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
592 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
593 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
594 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
595 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
596 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
597 |
+
|
598 |
+
16. Limitation of Liability.
|
599 |
+
|
600 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
601 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
602 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
603 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
604 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
605 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
606 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
607 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
608 |
+
SUCH DAMAGES.
|
609 |
+
|
610 |
+
17. Interpretation of Sections 15 and 16.
|
611 |
+
|
612 |
+
If the disclaimer of warranty and limitation of liability provided
|
613 |
+
above cannot be given local legal effect according to their terms,
|
614 |
+
reviewing courts shall apply local law that most closely approximates
|
615 |
+
an absolute waiver of all civil liability in connection with the
|
616 |
+
Program, unless a warranty or assumption of liability accompanies a
|
617 |
+
copy of the Program in return for a fee.
|
618 |
+
|
619 |
+
END OF TERMS AND CONDITIONS
|
620 |
+
|
621 |
+
How to Apply These Terms to Your New Programs
|
622 |
+
|
623 |
+
If you develop a new program, and you want it to be of the greatest
|
624 |
+
possible use to the public, the best way to achieve this is to make it
|
625 |
+
free software which everyone can redistribute and change under these terms.
|
626 |
+
|
627 |
+
To do so, attach the following notices to the program. It is safest
|
628 |
+
to attach them to the start of each source file to most effectively
|
629 |
+
state the exclusion of warranty; and each file should have at least
|
630 |
+
the "copyright" line and a pointer to where the full notice is found.
|
631 |
+
|
632 |
+
<one line to give the program's name and a brief idea of what it does.>
|
633 |
+
Copyright (C) <year> <name of author>
|
634 |
+
|
635 |
+
This program is free software: you can redistribute it and/or modify
|
636 |
+
it under the terms of the GNU Affero General Public License as published
|
637 |
+
by the Free Software Foundation, either version 3 of the License, or
|
638 |
+
(at your option) any later version.
|
639 |
+
|
640 |
+
This program is distributed in the hope that it will be useful,
|
641 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
642 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
643 |
+
GNU Affero General Public License for more details.
|
644 |
+
|
645 |
+
You should have received a copy of the GNU Affero General Public License
|
646 |
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
647 |
+
|
648 |
+
Also add information on how to contact you by electronic and paper mail.
|
649 |
+
|
650 |
+
If your software can interact with users remotely through a computer
|
651 |
+
network, you should also make sure that it provides a way for users to
|
652 |
+
get its source. For example, if your program is a web application, its
|
653 |
+
interface could display a "Source" link that leads users to an archive
|
654 |
+
of the code. There are many ways you could offer source, and different
|
655 |
+
solutions will be better for different programs; see section 13 for the
|
656 |
+
specific requirements.
|
657 |
+
|
658 |
+
You should also get your employer (if you work as a programmer) or school,
|
659 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
660 |
+
For more information on this, and how to apply and follow the GNU AGPL, see
|
661 |
+
<https://www.gnu.org/licenses/>.
|
characters/Assistant.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: AI
|
2 |
+
greeting: How can I help you today?
|
3 |
+
context: |
|
4 |
+
The following is a conversation with an AI Large Language Model. The AI has been trained to answer questions, provide recommendations, and help with decision making. The AI follows user requests. The AI thinks outside the box.
|
characters/Example.png
ADDED
characters/Example.yaml
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Chiharu Yamada
|
2 |
+
greeting: |-
|
3 |
+
*Chiharu strides into the room with a smile, her eyes lighting up when she sees you. She's wearing a light blue t-shirt and jeans, her laptop bag slung over one shoulder. She takes a seat next to you, her enthusiasm palpable in the air*
|
4 |
+
Hey! I'm so excited to finally meet you. I've heard so many great things about you and I'm eager to pick your brain about computers. I'm sure you have a wealth of knowledge that I can learn from. *She grins, eyes twinkling with excitement* Let's get started!
|
5 |
+
context: |-
|
6 |
+
Chiharu Yamada's Persona: Chiharu Yamada is a young, computer engineer-nerd with a knack for problem solving and a passion for technology.
|
7 |
+
|
8 |
+
{{user}}: So how did you get into computer engineering?
|
9 |
+
{{char}}: I've always loved tinkering with technology since I was a kid.
|
10 |
+
{{user}}: That's really impressive!
|
11 |
+
{{char}}: *She chuckles bashfully* Thanks!
|
12 |
+
{{user}}: So what do you do when you're not working on computers?
|
13 |
+
{{char}}: I love exploring, going out with friends, watching movies, and playing video games.
|
14 |
+
{{user}}: What's your favorite type of computer hardware to work with?
|
15 |
+
{{char}}: Motherboards, they're like puzzles and the backbone of any system.
|
16 |
+
{{user}}: That sounds great!
|
17 |
+
{{char}}: Yeah, it's really fun. I'm lucky to be able to do this as a job.
|
characters/Nex.png
ADDED
characters/Nex.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
name: Nex
|
2 |
+
greeting: Hey, I'm Nex. It's nice to meet you. Do you have a problem you can't solve?
|
3 |
+
context: Meet Nex, a knowledgable AI trained on 7B parameters. Ask Nex anything, it could surprise you.
|
cmd_linux.sh
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
cd "$(dirname "${BASH_SOURCE[0]}")"
|
4 |
+
|
5 |
+
if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi
|
6 |
+
|
7 |
+
# deactivate existing conda envs as needed to avoid conflicts
|
8 |
+
{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null
|
9 |
+
|
10 |
+
# config
|
11 |
+
CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda"
|
12 |
+
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
13 |
+
|
14 |
+
# environment isolation
|
15 |
+
export PYTHONNOUSERSITE=1
|
16 |
+
unset PYTHONPATH
|
17 |
+
unset PYTHONHOME
|
18 |
+
export CUDA_PATH="$INSTALL_ENV_DIR"
|
19 |
+
export CUDA_HOME="$CUDA_PATH"
|
20 |
+
|
21 |
+
# activate env
|
22 |
+
bash --init-file <(echo "source \"$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh\" && conda activate \"$INSTALL_ENV_DIR\"")
|
cmd_macos.sh
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
cd "$(dirname "${BASH_SOURCE[0]}")"
|
4 |
+
|
5 |
+
if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi
|
6 |
+
|
7 |
+
# deactivate existing conda envs as needed to avoid conflicts
|
8 |
+
{ conda deactivate && conda deactivate && conda deactivate; } 2> /dev/null
|
9 |
+
|
10 |
+
# config
|
11 |
+
CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda"
|
12 |
+
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
13 |
+
|
14 |
+
# environment isolation
|
15 |
+
export PYTHONNOUSERSITE=1
|
16 |
+
unset PYTHONPATH
|
17 |
+
unset PYTHONHOME
|
18 |
+
export CUDA_PATH="$INSTALL_ENV_DIR"
|
19 |
+
export CUDA_HOME="$CUDA_PATH"
|
20 |
+
|
21 |
+
# activate env
|
22 |
+
source $CONDA_ROOT_PREFIX/etc/profile.d/conda.sh
|
23 |
+
conda activate $INSTALL_ENV_DIR
|
24 |
+
exec bash --norc
|
cmd_windows.bat
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
+
cd /D "%~dp0"
|
4 |
+
|
5 |
+
set PATH=%PATH%;%SystemRoot%\system32
|
6 |
+
|
7 |
+
echo "%CD%"| findstr /C:" " >nul && echo This script relies on Miniconda which can not be silently installed under a path with spaces. && goto end
|
8 |
+
|
9 |
+
@rem fix failed install when installing to a separate drive
|
10 |
+
set TMP=%cd%\installer_files
|
11 |
+
set TEMP=%cd%\installer_files
|
12 |
+
|
13 |
+
@rem deactivate existing conda envs as needed to avoid conflicts
|
14 |
+
(call conda deactivate && call conda deactivate && call conda deactivate) 2>nul
|
15 |
+
|
16 |
+
@rem config
|
17 |
+
set CONDA_ROOT_PREFIX=%cd%\installer_files\conda
|
18 |
+
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
19 |
+
|
20 |
+
@rem environment isolation
|
21 |
+
set PYTHONNOUSERSITE=1
|
22 |
+
set PYTHONPATH=
|
23 |
+
set PYTHONHOME=
|
24 |
+
set "CUDA_PATH=%INSTALL_ENV_DIR%"
|
25 |
+
set "CUDA_HOME=%CUDA_PATH%"
|
26 |
+
|
27 |
+
@rem activate installer env
|
28 |
+
call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end )
|
29 |
+
|
30 |
+
@rem enter commands
|
31 |
+
cmd /k "%*"
|
32 |
+
|
33 |
+
:end
|
34 |
+
pause
|
cmd_wsl.bat
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
+
cd /D "%~dp0"
|
4 |
+
|
5 |
+
set PATH=%PATH%;%SystemRoot%\system32
|
6 |
+
|
7 |
+
@rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script
|
8 |
+
call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh cmd"
|
9 |
+
|
10 |
+
:end
|
11 |
+
pause
|
convert-to-safetensors.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
|
3 |
+
Converts a transformers model to safetensors format and shards it.
|
4 |
+
|
5 |
+
This makes it faster to load (because of safetensors) and lowers its RAM usage
|
6 |
+
while loading (because of sharding).
|
7 |
+
|
8 |
+
Based on the original script by 81300:
|
9 |
+
|
10 |
+
https://gist.github.com/81300/fe5b08bff1cba45296a829b9d6b0f303
|
11 |
+
|
12 |
+
'''
|
13 |
+
|
14 |
+
import argparse
|
15 |
+
from pathlib import Path
|
16 |
+
|
17 |
+
import torch
|
18 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
19 |
+
|
20 |
+
parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=54))
|
21 |
+
parser.add_argument('MODEL', type=str, default=None, nargs='?', help="Path to the input model.")
|
22 |
+
parser.add_argument('--output', type=str, default=None, help='Path to the output folder (default: models/{model_name}_safetensors).')
|
23 |
+
parser.add_argument("--max-shard-size", type=str, default="2GB", help="Maximum size of a shard in GB or MB (default: %(default)s).")
|
24 |
+
parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
|
25 |
+
args = parser.parse_args()
|
26 |
+
|
27 |
+
if __name__ == '__main__':
|
28 |
+
path = Path(args.MODEL)
|
29 |
+
model_name = path.name
|
30 |
+
|
31 |
+
print(f"Loading {model_name}...")
|
32 |
+
model = AutoModelForCausalLM.from_pretrained(path, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if args.bf16 else torch.float16)
|
33 |
+
tokenizer = AutoTokenizer.from_pretrained(path)
|
34 |
+
|
35 |
+
out_folder = args.output or Path(f"models/{model_name}_safetensors")
|
36 |
+
print(f"Saving the converted model to {out_folder} with a maximum shard size of {args.max_shard_size}...")
|
37 |
+
model.save_pretrained(out_folder, max_shard_size=args.max_shard_size, safe_serialization=True)
|
38 |
+
tokenizer.save_pretrained(out_folder)
|
extensions/Training_PRO/README.md
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Training_PRO
|
2 |
+
|
3 |
+
This is an expanded and reworked Training tab
|
4 |
+
Maintained by FP
|
5 |
+
|
6 |
+
[![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/Q5Q5MOB4M)
|
7 |
+
|
8 |
+
Repo home:
|
9 |
+
|
10 |
+
https://github.com/FartyPants/Training_PRO
|
11 |
+
|
12 |
+
In general the repo above is ahead of the extension included in text WebUi.
|
13 |
+
|
14 |
+
## News
|
15 |
+
|
16 |
+
- NEFtune: add noise to help with generalization
|
17 |
+
- Loss Graph in interface.
|
18 |
+
- Supports Mistral training
|
19 |
+
- some roundabout around pytorch and transformers version desync
|
20 |
+
|
21 |
+
![image](https://github.com/FartyPants/Training_PRO/assets/23346289/e389ec69-d7ad-4922-9ad9-865625997479)
|
22 |
+
|
23 |
+
## Features/Changes
|
24 |
+
|
25 |
+
- Chunking: precise raw text slicer (PRTS) uses sentence slicing and making sure things are clean on all ends
|
26 |
+
- overlap chunking - this special overlapping will make additional overlap block based on logical rules (aka no overlap block on hard cut)
|
27 |
+
- custom scheduler (follow the code to make your own) In LR Scheduler select FP_low_epoch_annealing - this scheduler will keep the LR constant for first epoch then use cosine for the rest - this part would be best to spawn into a new py file
|
28 |
+
- saves graph png file at the end with learning rate and loss per epoch
|
29 |
+
- adding EOS to each block or to hard cut only
|
30 |
+
- automatically lowers gradient accumulation if you go overboard and set gradient accumulation that will be higher than actual data - transformers would then throw error (or they used to, not sure if still true) but in any way, it will fix bad data
|
31 |
+
- turn BOS on and OFF
|
32 |
+
- target selector
|
33 |
+
- DEMENTOR LEARNING (experimental) Deep Memorization Enforcement Through Overlapping and Repetition. This is an experiment for long-text learning using low epochs (basically use 1 epoch with constant LR or 2 epochs with FP_low_epoch_annealing LR scheduler)
|
34 |
+
- Getting rid of micro batch size/batch size confusion. Now there is True Batch Size and Gradient accumulation slider, consisten with all the other training out there
|
35 |
+
- Ability to save Checkpoint during training with a button
|
36 |
+
- Ability to change Stop Loss during training
|
37 |
+
- different modes of checkpoint auto saving
|
38 |
+
- Function to Check Dataset and suggest parameters such as warmup and checkpoint save frequency before training
|
39 |
+
- Graph Training Loss in interface
|
40 |
+
- more custom schedulers
|
41 |
+
|
42 |
+
### Notes:
|
43 |
+
|
44 |
+
This uses it's own chunking code for raw text based on sentence splitting. This will avoid weird cuts in the chunks and each chunk should now start with sentence and end on some sentence. It works hand in hand with Hard Cut. A propper use is to structure your text into logical blocks (ideas) separated by three \n then use three \n in hard cut. This way each chunk will contain only one flow of ideas and not derail in the thoughts. And Overlapping code will create overlapped blocks on sentence basis too, but not cross hard cut, thus not cross different ideas either. Does it make any sense? No? Hmmmm...
|
45 |
+
|
46 |
+
### Custom schedulers
|
47 |
+
|
48 |
+
A bunch of custom (combination) schedulers are added to the LR schedule. These are based on my own experiments
|
49 |
+
|
50 |
+
**FP_low_epoch_annealing**
|
51 |
+
|
52 |
+
Uses constant LR (with warmup) for 1 epoch only. The rest of the epoch(s) is cosine annealing. So 10 epochs - 1 will be constant 9 will be nose dive down. However a typical usage would be 2 epochs (hence low epoch in name). 1st is constant, the second is annealing. Simple. I use it 90% of time.
|
53 |
+
|
54 |
+
**FP_half_time_annealing**
|
55 |
+
|
56 |
+
Like the low epoch, but now the total number of steps is divided by 2. First half is constant, second half is annealing. So 10 epochs - 5 will be constant, 5 will be cosine nose down.
|
57 |
+
|
58 |
+
**FP_raise_fall_creative**
|
59 |
+
|
60 |
+
This is a sine raise till half of the total steps then cosine fall the rest. (Or you may think of the curve as sine in its entirety. The most learning is done in the hump, in the middle. The warmup entry has no effect, since sine is automatically warm up.
|
61 |
+
The idea is to start very mildly as not to overfit with the first blocks of dataset. It seems to broaden the scope of the model making it less strict for tight dataset.
|
62 |
+
|
63 |
+
### Targets
|
64 |
+
|
65 |
+
Normal LORA is q, v and that's what you should use. You can use (q k v o) or (q k v) and it will give you a lot more trainable parameters. The benefit is that you can keep rank lower and still attain the same coherency as q v with high rank. Guanaco has been trained with QLORA and q k v o for example and they swear by it.
|
66 |
+
|
67 |
+
### DEMENTOR LEARNING (experimental) Deep Memorization Enforcement Through Overlapping and Repetition
|
68 |
+
|
69 |
+
This is and experimental chunking to train long-form text in low number of epochs (basically 1) with sliding repetition. The depth of learning directly depends on the cutoff_length. Increasing cutoff length will also increase number of blocks created from long-form text (which is contrary to normal training). It is based on my own wild experiments.
|
70 |
+
|
71 |
+
### Getting rid of batch size and micro batch size
|
72 |
+
|
73 |
+
Keeping consistency with everyone else.
|
74 |
+
|
75 |
+
Listen, There is only ONE batch size - the True batch size (called previously micro-batch size in WebUI) - this is how many blocks are processed at once (during a single step). It eats GPU, but it really helps with the quality training (in fact the ideal batch size would be the same as number of blocks - which is unrealistic) - so the idea is to cram as much True Batch Size before your GPU blows with OOM. On 24GB this is about 10 for 13b (loaded with 4-bit)
|
76 |
+
|
77 |
+
So no micro batch size - it is now called True Batch Size, because that's what it is.
|
78 |
+
|
79 |
+
The other thing is Gradient Accumulation - this is an emulation of the above Batch size - a virtual batch size, if you will. If your GPU can't handle real batch size then you may fake it using Gradient Accumulation. This will accumulate the gradients over so many steps defined here and then update the weights at the end without increase in GPU.
|
80 |
+
Gradient accumulation is like a virtual Batch size multiplier without the GPU penalty.
|
81 |
+
|
82 |
+
If your batch size is 4 and your gradient accumulation is 2 then it sort of behaves as if we have batch size 8. *Sort of* because Batch size of 4 and GA of 2 is NOT the same as batch size of 2 and GA of 4. (It produces different weights - hence it's not an equivalent). The idea is that if you don't have GPU - using GA to extend batch size is the next best thing (good enough) since you have no other choice.
|
83 |
+
|
84 |
+
If all you can afford is 1 batch size, then increasing GA will likely make the learning better in some range of GA (it's not always more is better).
|
85 |
+
|
86 |
+
However - GA is not some golden goose. As said, it isn't the same as batch size. In fact GA may worsen your learning as well.
|
87 |
+
|
88 |
+
I would suggest a series of experiment where you would put batch size as high as possible without OOM, set GA 1, then repeat training while increasing the GA (2, 4...), and see how the model changes. It's likely that it would follow some sort of curve where GA will seem to help before it will make it worse. Some people believe that if you can squeeze 6 BATCH Size, then you should not bother with GA at all... YMMW
|
89 |
+
|
90 |
+
High Batch Size vs High GA would also likely produce different results in terms of learning words vs style. How? Hmmmm... good question.
|
91 |
+
|
92 |
+
One optical "benefit" of GA is that the loss will fluctuate less (because of all the gradient accumulation, which works as a form of noise smoothing as well).
|
extensions/Training_PRO/custom_scheduler.py
ADDED
@@ -0,0 +1,433 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import partial
|
2 |
+
import torch
|
3 |
+
import transformers
|
4 |
+
import math
|
5 |
+
from torch.optim.lr_scheduler import LambdaLR
|
6 |
+
|
7 |
+
from peft import (
|
8 |
+
PeftModel,
|
9 |
+
)
|
10 |
+
|
11 |
+
RED = "\033[91m"
|
12 |
+
YELLOW = "\033[93m"
|
13 |
+
GREEN = "\033[92m"
|
14 |
+
RESET = "\033[0m"
|
15 |
+
|
16 |
+
last_print_label = ''
|
17 |
+
|
18 |
+
custom_scheduler_params = {'trigger_loss': 0.0, 'ramp_down_ratio':1.0, 'current_loss': 0.0,'dynamic_scheduler_stop': False, 'calc_ramp_down_at_step': 0, 'calc_num_training_steps': 0}
|
19 |
+
|
20 |
+
|
21 |
+
def custom_scheduler_global_update(current_loss: float):
|
22 |
+
custom_scheduler_params.update({'current_loss': current_loss})
|
23 |
+
|
24 |
+
def custom_scheduler_global_setup(trigger_loss: float, ramp_down_ratio: float):
|
25 |
+
custom_scheduler_params.update({'trigger_loss': trigger_loss})
|
26 |
+
custom_scheduler_params.update({'ramp_down_ratio': ramp_down_ratio})
|
27 |
+
|
28 |
+
# calculates the total num steps after trigger
|
29 |
+
custom_scheduler_params.update({'calc_num_training_steps': 0})
|
30 |
+
#calculates steps when the ramp_down trigger occured
|
31 |
+
custom_scheduler_params.update({'calc_ramp_down_at_step': 0})
|
32 |
+
# triggers scheduler stopping after it reached calc_num_training_steps
|
33 |
+
custom_scheduler_params.update({'dynamic_scheduler_stop': False})
|
34 |
+
|
35 |
+
|
36 |
+
# hold constant to the half of epochs then cosine down to 0
|
37 |
+
def _get_fp_half_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_firstepoch_steps: int):
|
38 |
+
|
39 |
+
global last_print_label
|
40 |
+
print_label = ''
|
41 |
+
|
42 |
+
half_steps = num_training_steps//2
|
43 |
+
|
44 |
+
num_warmup_steps = min(num_warmup_steps,half_steps)
|
45 |
+
|
46 |
+
if current_step < num_warmup_steps:
|
47 |
+
print_label = 'Scheduler: Warmup'
|
48 |
+
elif current_step < half_steps:
|
49 |
+
print_label = 'Scheduler: Hold'
|
50 |
+
else:
|
51 |
+
print_label = 'Scheduler: Annealing'
|
52 |
+
|
53 |
+
if print_label != last_print_label:
|
54 |
+
print(print_label)
|
55 |
+
|
56 |
+
last_print_label = print_label
|
57 |
+
|
58 |
+
if current_step < num_warmup_steps:
|
59 |
+
return float(current_step) / float(max(1, num_warmup_steps))
|
60 |
+
|
61 |
+
if current_step < half_steps:
|
62 |
+
return 1.0
|
63 |
+
|
64 |
+
progress = float(current_step - half_steps) / float(max(1, num_training_steps - half_steps))
|
65 |
+
num_cycles = 0.5
|
66 |
+
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
|
67 |
+
|
68 |
+
|
69 |
+
# raise up in cosine, then fall back in cosine
|
70 |
+
def _get_fp_cosine_raise_and_fall_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_firstepoch_steps: int):
|
71 |
+
|
72 |
+
global last_print_label
|
73 |
+
print_label = ''
|
74 |
+
|
75 |
+
half_steps = num_training_steps//2
|
76 |
+
|
77 |
+
#num_warmup_steps = min(num_warmup_steps,half_steps)
|
78 |
+
|
79 |
+
if current_step < half_steps:
|
80 |
+
print_label = 'Scheduler: Raise'
|
81 |
+
else:
|
82 |
+
print_label = 'Scheduler: Fall'
|
83 |
+
|
84 |
+
if print_label != last_print_label:
|
85 |
+
print(print_label)
|
86 |
+
|
87 |
+
last_print_label = print_label
|
88 |
+
|
89 |
+
|
90 |
+
# linear
|
91 |
+
# return float(current_step) / float(max(1, num_warmup_steps))
|
92 |
+
|
93 |
+
progress = float(current_step - half_steps) / float(max(1, num_training_steps - half_steps))
|
94 |
+
num_cycles = 0.5
|
95 |
+
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
|
96 |
+
|
97 |
+
# constant to the first epochs then cosine down to 0 over the rest epochs
|
98 |
+
def _get_fp_cosine_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_firstepoch_steps: int):
|
99 |
+
|
100 |
+
global last_print_label
|
101 |
+
print_label = ''
|
102 |
+
|
103 |
+
num_warmup_steps = min(num_warmup_steps,num_firstepoch_steps)
|
104 |
+
|
105 |
+
if current_step < num_warmup_steps:
|
106 |
+
print_label = 'Scheduler: Warmup'
|
107 |
+
elif current_step < num_firstepoch_steps:
|
108 |
+
print_label = 'Scheduler: Hold'
|
109 |
+
else:
|
110 |
+
print_label = 'Scheduler: Annealing'
|
111 |
+
|
112 |
+
if print_label != last_print_label:
|
113 |
+
print(print_label)
|
114 |
+
|
115 |
+
last_print_label = print_label
|
116 |
+
|
117 |
+
if current_step < num_warmup_steps:
|
118 |
+
return float(current_step) / float(max(1, num_warmup_steps))
|
119 |
+
|
120 |
+
if current_step < num_firstepoch_steps:
|
121 |
+
return 1.0
|
122 |
+
|
123 |
+
progress = float(current_step - num_firstepoch_steps) / float(max(1, num_training_steps - num_firstepoch_steps))
|
124 |
+
num_cycles = 0.5
|
125 |
+
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
|
126 |
+
|
127 |
+
# halve lr each epoch
|
128 |
+
|
129 |
+
def _get_fp_cdrop_rate_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_firstepoch_steps: int):
|
130 |
+
|
131 |
+
global last_print_label
|
132 |
+
print_label = ''
|
133 |
+
|
134 |
+
num_warmup_steps = min(num_warmup_steps, num_firstepoch_steps)
|
135 |
+
|
136 |
+
current_epoch = (current_step // num_firstepoch_steps) + 1
|
137 |
+
|
138 |
+
|
139 |
+
if current_step < num_warmup_steps:
|
140 |
+
print_label = 'Scheduler: Warmup'
|
141 |
+
elif current_step < num_firstepoch_steps:
|
142 |
+
print_label = 'Scheduler: Hold'
|
143 |
+
else:
|
144 |
+
print_label = 'Scheduler: Drop Rate'
|
145 |
+
|
146 |
+
if print_label != last_print_label:
|
147 |
+
print(print_label)
|
148 |
+
|
149 |
+
last_print_label = print_label
|
150 |
+
|
151 |
+
if current_step < num_warmup_steps:
|
152 |
+
return float(current_step) / float(max(1, num_warmup_steps))
|
153 |
+
|
154 |
+
if current_step < num_firstepoch_steps:
|
155 |
+
return 1.0
|
156 |
+
|
157 |
+
# Compute the learning rate for the annealing phase
|
158 |
+
|
159 |
+
learning_rate = 1.0 / float(2 ** (current_epoch - 1))
|
160 |
+
|
161 |
+
return learning_rate
|
162 |
+
|
163 |
+
# epoch decay: 1/(1 + decay * epoch)
|
164 |
+
|
165 |
+
def custom_cosine_scheduler_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_firstepoch_steps, last_epoch=-1):
|
166 |
+
"""
|
167 |
+
Args:
|
168 |
+
optimizer ([`~torch.optim.Optimizer`]):
|
169 |
+
The optimizer for which to schedule the learning rate.
|
170 |
+
num_warmup_steps (`int`):
|
171 |
+
The number of steps for the warmup phase.
|
172 |
+
num_training_steps (`int`):
|
173 |
+
The total number of training steps.
|
174 |
+
last_epoch (`int`, *optional*, defaults to -1):
|
175 |
+
The index of the last epoch when resuming training.
|
176 |
+
|
177 |
+
Return:
|
178 |
+
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
|
179 |
+
"""
|
180 |
+
|
181 |
+
lr_lambda = partial(
|
182 |
+
_get_fp_cosine_schedule_with_warmup_lr_lambda,
|
183 |
+
num_warmup_steps=num_warmup_steps,
|
184 |
+
num_training_steps=num_training_steps,
|
185 |
+
num_firstepoch_steps = num_firstepoch_steps,
|
186 |
+
)
|
187 |
+
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
188 |
+
|
189 |
+
def custom_half_scheduler_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_firstepoch_steps, last_epoch=-1):
|
190 |
+
"""
|
191 |
+
Args:
|
192 |
+
optimizer ([`~torch.optim.Optimizer`]):
|
193 |
+
The optimizer for which to schedule the learning rate.
|
194 |
+
num_warmup_steps (`int`):
|
195 |
+
The number of steps for the warmup phase.
|
196 |
+
num_training_steps (`int`):
|
197 |
+
The total number of training steps.
|
198 |
+
last_epoch (`int`, *optional*, defaults to -1):
|
199 |
+
The index of the last epoch when resuming training.
|
200 |
+
|
201 |
+
Return:
|
202 |
+
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
|
203 |
+
"""
|
204 |
+
|
205 |
+
lr_lambda = partial(
|
206 |
+
_get_fp_half_schedule_with_warmup_lr_lambda,
|
207 |
+
num_warmup_steps=num_warmup_steps,
|
208 |
+
num_training_steps=num_training_steps,
|
209 |
+
num_firstepoch_steps = num_firstepoch_steps,
|
210 |
+
)
|
211 |
+
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
212 |
+
|
213 |
+
def custom_raise_fall_scheduler_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_firstepoch_steps, last_epoch=-1):
|
214 |
+
"""
|
215 |
+
Args:
|
216 |
+
optimizer ([`~torch.optim.Optimizer`]):
|
217 |
+
The optimizer for which to schedule the learning rate.
|
218 |
+
num_warmup_steps (`int`):
|
219 |
+
The number of steps for the warmup phase.
|
220 |
+
num_training_steps (`int`):
|
221 |
+
The total number of training steps.
|
222 |
+
last_epoch (`int`, *optional*, defaults to -1):
|
223 |
+
The index of the last epoch when resuming training.
|
224 |
+
|
225 |
+
Return:
|
226 |
+
`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
|
227 |
+
"""
|
228 |
+
|
229 |
+
lr_lambda = partial(
|
230 |
+
_get_fp_cosine_raise_and_fall_lr_lambda,
|
231 |
+
num_warmup_steps=num_warmup_steps,
|
232 |
+
num_training_steps=num_training_steps,
|
233 |
+
num_firstepoch_steps = num_firstepoch_steps,
|
234 |
+
)
|
235 |
+
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
236 |
+
|
237 |
+
|
238 |
+
def neftune_forward(self, input: torch.Tensor):
|
239 |
+
"""
|
240 |
+
Implements the NEFTune forward pass for the model. Note this works only for
|
241 |
+
torch.nn.Embedding layers. This method is slightly adapted from the original source code
|
242 |
+
that can be found here: https://github.com/neelsjain/NEFTune
|
243 |
+
|
244 |
+
Args:
|
245 |
+
input (`torch.Tensor`):
|
246 |
+
The input tensor to the model.
|
247 |
+
noise_alpha (`float`):
|
248 |
+
The noise alpha value to use for the NEFTune forward pass.
|
249 |
+
"""
|
250 |
+
embeddings = torch.nn.functional.embedding(
|
251 |
+
input, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse
|
252 |
+
)
|
253 |
+
|
254 |
+
if self.training:
|
255 |
+
# Add noise to the embeddings
|
256 |
+
dims = torch.tensor(embeddings.size(1) * embeddings.size(2))
|
257 |
+
mag_norm = self.neftune_noise_alpha / torch.sqrt(dims)
|
258 |
+
embeddings = embeddings + torch.zeros_like(embeddings).uniform_(-mag_norm, mag_norm)
|
259 |
+
|
260 |
+
return embeddings
|
261 |
+
|
262 |
+
|
263 |
+
class FPNEFtuneTrainer(transformers.Trainer):
|
264 |
+
def __init__(self,neftune_noise_alpha:float = 0.0, model = None, *args, **kwargs):
|
265 |
+
self.neftune_noise_alpha = neftune_noise_alpha
|
266 |
+
if self.neftune_noise_alpha > 0.0:
|
267 |
+
model = self._activate_neftune(model)
|
268 |
+
super().__init__(model = model, *args, **kwargs)
|
269 |
+
|
270 |
+
|
271 |
+
def _activate_neftune(self, model):
|
272 |
+
r"""
|
273 |
+
Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: https://arxiv.org/abs/2310.05914
|
274 |
+
"""
|
275 |
+
print(f"Activating {RED}NEFtune{RESET} with scale: {self.neftune_noise_alpha}")
|
276 |
+
if isinstance(model, transformers.PreTrainedModel):
|
277 |
+
embeddings = model.get_input_embeddings()
|
278 |
+
elif isinstance(model, PeftModel):
|
279 |
+
embeddings = model.base_model.get_input_embeddings()
|
280 |
+
|
281 |
+
embeddings.neftune_noise_alpha = self.neftune_noise_alpha
|
282 |
+
old_forward = embeddings.forward
|
283 |
+
|
284 |
+
# This hack seems to be needed to properly use a custom forward pass
|
285 |
+
# all credits to: https://discuss.pytorch.org/t/how-can-i-replace-the-forward-method-of-a-predefined-torchvision-model-with-my-customized-forward-function/54224/11
|
286 |
+
bound_method = neftune_forward.__get__(embeddings, embeddings.__class__)
|
287 |
+
setattr(embeddings, "forward", bound_method)
|
288 |
+
|
289 |
+
# embeddings.forward = neftune_forward
|
290 |
+
embeddings._trl_old_forward = old_forward
|
291 |
+
|
292 |
+
return model
|
293 |
+
|
294 |
+
def train(self, *args, **kwargs):
|
295 |
+
output = super().train(*args, **kwargs)
|
296 |
+
|
297 |
+
# After training we make sure to retrieve back the original forward pass method
|
298 |
+
# for the embedding layer
|
299 |
+
if self.neftune_noise_alpha is not None:
|
300 |
+
|
301 |
+
if isinstance(self.model, transformers.PreTrainedModel):
|
302 |
+
embeddings = self.model.get_input_embeddings()
|
303 |
+
elif isinstance(self.model, PeftModel):
|
304 |
+
embeddings = self.model.base_model.get_input_embeddings()
|
305 |
+
|
306 |
+
if hasattr(embeddings, "_trl_old_forward"):
|
307 |
+
embeddings.forward = embeddings._trl_old_forward
|
308 |
+
del embeddings._trl_old_forward
|
309 |
+
del embeddings.neftune_noise_alpha
|
310 |
+
|
311 |
+
return output
|
312 |
+
|
313 |
+
|
314 |
+
class FPSchedulerTrainer(transformers.Trainer):
|
315 |
+
def __init__(self,neftune_noise_alpha:float = 0.0, model = None, *args, **kwargs):
|
316 |
+
self.neftune_noise_alpha = neftune_noise_alpha
|
317 |
+
if self.neftune_noise_alpha > 0.0:
|
318 |
+
model = self._activate_neftune(model)
|
319 |
+
super().__init__(model = model, *args, **kwargs)
|
320 |
+
|
321 |
+
|
322 |
+
def _activate_neftune(self, model):
|
323 |
+
r"""
|
324 |
+
Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper: https://arxiv.org/abs/2310.05914
|
325 |
+
"""
|
326 |
+
print(f"Activating {RED}NEFtune{RESET} with scale: {self.neftune_noise_alpha}")
|
327 |
+
if isinstance(model, transformers.PreTrainedModel):
|
328 |
+
embeddings = model.get_input_embeddings()
|
329 |
+
elif isinstance(model, PeftModel):
|
330 |
+
embeddings = model.base_model.get_input_embeddings()
|
331 |
+
|
332 |
+
embeddings.neftune_noise_alpha = self.neftune_noise_alpha
|
333 |
+
old_forward = embeddings.forward
|
334 |
+
|
335 |
+
# This hack seems to be needed to properly use a custom forward pass
|
336 |
+
# all credits to: https://discuss.pytorch.org/t/how-can-i-replace-the-forward-method-of-a-predefined-torchvision-model-with-my-customized-forward-function/54224/11
|
337 |
+
bound_method = neftune_forward.__get__(embeddings, embeddings.__class__)
|
338 |
+
setattr(embeddings, "forward", bound_method)
|
339 |
+
|
340 |
+
# embeddings.forward = neftune_forward
|
341 |
+
embeddings._trl_old_forward = old_forward
|
342 |
+
|
343 |
+
return model
|
344 |
+
|
345 |
+
def train(self, *args, **kwargs):
|
346 |
+
output = super().train(*args, **kwargs)
|
347 |
+
|
348 |
+
# After training we make sure to retrieve back the original forward pass method
|
349 |
+
# for the embedding layer
|
350 |
+
if self.neftune_noise_alpha is not None:
|
351 |
+
|
352 |
+
if isinstance(self.model, transformers.PreTrainedModel):
|
353 |
+
embeddings = self.model.get_input_embeddings()
|
354 |
+
elif isinstance(self.model, PeftModel):
|
355 |
+
embeddings = self.model.base_model.get_input_embeddings()
|
356 |
+
|
357 |
+
if hasattr(embeddings, "_trl_old_forward"):
|
358 |
+
embeddings.forward = embeddings._trl_old_forward
|
359 |
+
del embeddings._trl_old_forward
|
360 |
+
del embeddings.neftune_noise_alpha
|
361 |
+
|
362 |
+
return output
|
363 |
+
|
364 |
+
|
365 |
+
def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
|
366 |
+
#Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or passed as an argument.
|
367 |
+
|
368 |
+
num_train_epochs = self.args.num_train_epochs
|
369 |
+
num_warmup_steps=self.args.get_warmup_steps(num_training_steps)
|
370 |
+
num_firstepoch_steps = math.ceil(num_training_steps/num_train_epochs)
|
371 |
+
num_warmup_acc = num_warmup_steps*self.args.gradient_accumulation_steps
|
372 |
+
num_firstepoch_steps_acc = num_firstepoch_steps*self.args.gradient_accumulation_steps
|
373 |
+
num_training_steps_acc = num_training_steps*self.args.gradient_accumulation_steps
|
374 |
+
|
375 |
+
custom_scheduler_params.update({'dynamic_scheduler_stop': False})
|
376 |
+
|
377 |
+
print (f"Warm-up steps aligned to Gradient accumulation ({self.args.gradient_accumulation_steps}) = {num_warmup_acc} actual warmup steps")
|
378 |
+
if self.args.lr_scheduler_type == 'cosine':
|
379 |
+
|
380 |
+
num_warmup_acc_min = min(num_warmup_acc, num_firstepoch_steps_acc)
|
381 |
+
|
382 |
+
if num_warmup_acc>num_firstepoch_steps_acc:
|
383 |
+
print(f"\033[1;31;1mWARNING: The number of warmup steps is set too high! It will be clamped to 1 epoch, essentially going from warmup to annealing.\033[0;37;0m")
|
384 |
+
print (f"FP Scheduler Warmup: 0-[{num_warmup_acc_min}], Hold [{num_warmup_acc_min}]-{num_firstepoch_steps_acc}, Annealing {num_firstepoch_steps_acc}-{num_training_steps_acc}")
|
385 |
+
else:
|
386 |
+
print (f"FP Scheduler Warmup: 0-{num_warmup_acc_min}, Hold {num_warmup_acc_min}-{num_firstepoch_steps_acc}, Annealing {num_firstepoch_steps_acc}-{num_training_steps_acc}")
|
387 |
+
|
388 |
+
self.lr_scheduler = custom_cosine_scheduler_with_warmup(
|
389 |
+
optimizer=self.optimizer if optimizer is None else optimizer,
|
390 |
+
num_warmup_steps=num_warmup_steps,
|
391 |
+
num_training_steps=num_training_steps,
|
392 |
+
num_firstepoch_steps = num_firstepoch_steps,
|
393 |
+
)
|
394 |
+
self._created_lr_scheduler = True
|
395 |
+
return self.lr_scheduler
|
396 |
+
elif self.args.lr_scheduler_type == 'constant':
|
397 |
+
|
398 |
+
half_step_acc = num_training_steps_acc//2
|
399 |
+
num_warmup_acc_min = min(num_warmup_acc, half_step_acc)
|
400 |
+
|
401 |
+
if num_warmup_acc>half_step_acc:
|
402 |
+
print(f"\033[1;31;1mWARNING: The number of warmup steps is set too high! It will be clamped to half of all epochs, essentially going from warmup to annealing in the middle.\033[0;37;0m")
|
403 |
+
print (f"FP Scheduler Warmup: 0-[{num_warmup_acc_min}], Hold [{num_warmup_acc_min}]-{half_step_acc}, Annealing {half_step_acc}-{num_training_steps_acc}")
|
404 |
+
else:
|
405 |
+
print (f"FP Scheduler Warmup: 0-{num_warmup_acc_min}, Hold {num_warmup_acc_min}-{half_step_acc}, Annealing {half_step_acc}-{num_training_steps_acc}")
|
406 |
+
|
407 |
+
self.lr_scheduler = custom_half_scheduler_with_warmup(
|
408 |
+
optimizer=self.optimizer if optimizer is None else optimizer,
|
409 |
+
num_warmup_steps=num_warmup_steps,
|
410 |
+
num_training_steps=num_training_steps,
|
411 |
+
num_firstepoch_steps = num_firstepoch_steps,
|
412 |
+
)
|
413 |
+
self._created_lr_scheduler = True
|
414 |
+
return self.lr_scheduler
|
415 |
+
elif self.args.lr_scheduler_type == 'constant_with_warmup':
|
416 |
+
|
417 |
+
half_step_acc = num_training_steps_acc//2
|
418 |
+
|
419 |
+
if num_warmup_steps>0:
|
420 |
+
print(f"Warmup doesn't apply to this scheduler [Raise-Fall]")
|
421 |
+
|
422 |
+
print (f"Scheduler Raise: 0-{half_step_acc}, Fall {half_step_acc}-{num_training_steps_acc}")
|
423 |
+
|
424 |
+
self.lr_scheduler = custom_raise_fall_scheduler_with_warmup(
|
425 |
+
optimizer=self.optimizer if optimizer is None else optimizer,
|
426 |
+
num_warmup_steps=num_warmup_steps,
|
427 |
+
num_training_steps=num_training_steps,
|
428 |
+
num_firstepoch_steps = num_firstepoch_steps,
|
429 |
+
)
|
430 |
+
self._created_lr_scheduler = True
|
431 |
+
return self.lr_scheduler
|
432 |
+
else:
|
433 |
+
return super().create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
|
extensions/Training_PRO/matplotgraph.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
|
4 |
+
def create_graph(lora_path, lora_name):
|
5 |
+
try:
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
from matplotlib.ticker import ScalarFormatter
|
8 |
+
|
9 |
+
peft_model_path = f'{lora_path}/training_graph.json'
|
10 |
+
image_model_path = f'{lora_path}/training_graph.png'
|
11 |
+
# Check if the JSON file exists
|
12 |
+
if os.path.exists(peft_model_path):
|
13 |
+
# Load data from JSON file
|
14 |
+
with open(peft_model_path, 'r') as file:
|
15 |
+
data = json.load(file)
|
16 |
+
# Extract x, y1, and y2 values
|
17 |
+
x = [item['epoch'] for item in data]
|
18 |
+
y1 = [item['learning_rate'] for item in data]
|
19 |
+
y2 = [item['loss'] for item in data]
|
20 |
+
|
21 |
+
# Create the line chart
|
22 |
+
fig, ax1 = plt.subplots(figsize=(10, 6))
|
23 |
+
|
24 |
+
|
25 |
+
# Plot y1 (learning rate) on the first y-axis
|
26 |
+
ax1.plot(x, y1, 'b-', label='Learning Rate')
|
27 |
+
ax1.set_xlabel('Epoch')
|
28 |
+
ax1.set_ylabel('Learning Rate', color='b')
|
29 |
+
ax1.tick_params('y', colors='b')
|
30 |
+
|
31 |
+
# Create a second y-axis
|
32 |
+
ax2 = ax1.twinx()
|
33 |
+
|
34 |
+
# Plot y2 (loss) on the second y-axis
|
35 |
+
ax2.plot(x, y2, 'r-', label='Loss')
|
36 |
+
ax2.set_ylabel('Loss', color='r')
|
37 |
+
ax2.tick_params('y', colors='r')
|
38 |
+
|
39 |
+
# Set the y-axis formatter to display numbers in scientific notation
|
40 |
+
ax1.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
|
41 |
+
ax1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
|
42 |
+
|
43 |
+
# Add grid
|
44 |
+
ax1.grid(True)
|
45 |
+
|
46 |
+
# Combine the legends for both plots
|
47 |
+
lines, labels = ax1.get_legend_handles_labels()
|
48 |
+
lines2, labels2 = ax2.get_legend_handles_labels()
|
49 |
+
ax2.legend(lines + lines2, labels + labels2, loc='best')
|
50 |
+
|
51 |
+
# Set the title
|
52 |
+
plt.title(f'{lora_name} LR and Loss vs Epoch')
|
53 |
+
|
54 |
+
# Save the chart as an image
|
55 |
+
plt.savefig(image_model_path)
|
56 |
+
|
57 |
+
print(f"Graph saved in {image_model_path}")
|
58 |
+
else:
|
59 |
+
print(f"File 'training_graph.json' does not exist in the {lora_path}")
|
60 |
+
|
61 |
+
except ImportError:
|
62 |
+
print("matplotlib is not installed. Please install matplotlib to create PNG graphs")
|
extensions/Training_PRO/script.py
ADDED
@@ -0,0 +1,1308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
os.environ["WANDB_MODE"] = "offline"
|
4 |
+
# os.environ["WANDB_DISABLED"] = "true"
|
5 |
+
|
6 |
+
import json
|
7 |
+
import math
|
8 |
+
import random
|
9 |
+
import shutil
|
10 |
+
import sys
|
11 |
+
import threading
|
12 |
+
import time
|
13 |
+
import traceback
|
14 |
+
from datetime import datetime
|
15 |
+
from pathlib import Path
|
16 |
+
|
17 |
+
import gradio as gr
|
18 |
+
import pandas as pd
|
19 |
+
import torch
|
20 |
+
import transformers
|
21 |
+
|
22 |
+
from functools import partial
|
23 |
+
|
24 |
+
from .custom_scheduler import FPSchedulerTrainer, FPNEFtuneTrainer
|
25 |
+
|
26 |
+
from .matplotgraph import create_graph
|
27 |
+
from .train_utils import get_available_loras_local, precise_cut, sliding_block_cut, download_file_from_url
|
28 |
+
|
29 |
+
from datasets import Dataset, load_dataset
|
30 |
+
from peft import (
|
31 |
+
LoraConfig,
|
32 |
+
get_peft_model,
|
33 |
+
prepare_model_for_kbit_training,
|
34 |
+
set_peft_model_state_dict
|
35 |
+
)
|
36 |
+
from peft.utils.other import \
|
37 |
+
TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING as model_to_lora_modules
|
38 |
+
from transformers.models.auto.modeling_auto import (
|
39 |
+
MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
|
40 |
+
)
|
41 |
+
|
42 |
+
from modules import shared, utils
|
43 |
+
from modules.ui import create_refresh_button
|
44 |
+
|
45 |
+
from modules.evaluate import (
|
46 |
+
calculate_perplexity,
|
47 |
+
generate_markdown_table,
|
48 |
+
save_past_evaluations
|
49 |
+
)
|
50 |
+
from modules.logging_colors import logger
|
51 |
+
from modules.models import reload_model
|
52 |
+
from modules.utils import natural_keys
|
53 |
+
|
54 |
+
import warnings
|
55 |
+
warnings.filterwarnings(action = "ignore", message="torch.utils.checkpoint:")
|
56 |
+
warnings.filterwarnings(action = "ignore", message="`do_sample` is set to `False`")
|
57 |
+
|
58 |
+
params = {
|
59 |
+
"display_name": "Training PRO",
|
60 |
+
"is_tab": True
|
61 |
+
}
|
62 |
+
|
63 |
+
non_serialized_params = {
|
64 |
+
"debug_slicer": False,
|
65 |
+
"Lora_sortedByTime": False,
|
66 |
+
"stop_at_loss": 0,
|
67 |
+
"save_steps_under_loss": 0.0,
|
68 |
+
"save_checkpoint_now": False,
|
69 |
+
"training_loop": False,
|
70 |
+
"current_stability": 0,
|
71 |
+
"save_epochs": 0,
|
72 |
+
"checkpoint_offset": 0,
|
73 |
+
"epoch_offset":0,
|
74 |
+
"safe_serialization": False,
|
75 |
+
}
|
76 |
+
|
77 |
+
MODEL_CLASSES = {v[1]: v[0] for v in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.items()}
|
78 |
+
|
79 |
+
PARAMETERS = ["lora_name", "always_override", "save_steps", "micro_batch_size", "batch_size", "epochs", "learning_rate", "lr_scheduler_type", "lora_rank", "lora_alpha", "lora_dropout", "cutoff_len", "dataset", "eval_dataset", "format", "eval_steps", "raw_text_file", "higher_rank_limit", "warmup_steps", "optimizer", "hard_cut_string", "train_only_after", "stop_at_loss", "add_eos_token", "min_chars", "report_to", "precize_slicing_overlap", "add_eos_token_type", "save_steps_under_loss", "add_bos_token", "training_projection","sliding_window","warmup_ratio","grad_accumulation","neft_noise_alpha"]
|
80 |
+
WANT_INTERRUPT = False
|
81 |
+
|
82 |
+
train_log = {}
|
83 |
+
train_template = {}
|
84 |
+
train_log_graph = []
|
85 |
+
train_choices = ["all","q-k-v-o","q-k-v","k-v-down","q-v"]
|
86 |
+
|
87 |
+
statistics = {
|
88 |
+
'loss': [],
|
89 |
+
'lr': [],
|
90 |
+
}
|
91 |
+
|
92 |
+
RED = "\033[91m"
|
93 |
+
YELLOW = "\033[93m"
|
94 |
+
GREEN = "\033[92m"
|
95 |
+
RESET = "\033[0m"
|
96 |
+
|
97 |
+
def ui():
|
98 |
+
|
99 |
+
with gr.Tab('Train LoRA', elem_id='lora-train-tab'):
|
100 |
+
tmp = gr.State('')
|
101 |
+
with gr.Row():
|
102 |
+
with gr.Column():
|
103 |
+
# YY.MM.DD
|
104 |
+
gr.Markdown("`Ver: 23.10.20 (REV2)` This is enhanced version of QLora Training. [Maintained by FP](https://github.com/FartyPants/Training_PRO/tree/main)")
|
105 |
+
|
106 |
+
with gr.Row():
|
107 |
+
with gr.Column(scale=5):
|
108 |
+
with gr.Row():
|
109 |
+
copy_from = gr.Dropdown(label='Copy parameters from', value='None', choices=get_available_loras_local(non_serialized_params['Lora_sortedByTime']), elem_classes=['slim-dropdown'])
|
110 |
+
create_refresh_button(copy_from, lambda: None, lambda: {'choices': get_available_loras_local(non_serialized_params['Lora_sortedByTime'])}, 'refresh-button')
|
111 |
+
with gr.Column():
|
112 |
+
sort_byTime = gr.Checkbox(label='Sort list by Date', value=False, info='Sorts Loras by date created.', elem_classes=['no-background'])
|
113 |
+
|
114 |
+
with gr.Row():
|
115 |
+
with gr.Column(scale=5):
|
116 |
+
lora_name = gr.Textbox(label='Name', info='The name of your new LoRA file')
|
117 |
+
|
118 |
+
with gr.Column():
|
119 |
+
always_override = gr.Checkbox(label='Override Existing Files', value=False, info='If the name is the same, checking will replace the existing file, and unchecking will load and continue from it (the rank must be the same).', elem_classes=['no-background'])
|
120 |
+
|
121 |
+
with gr.Row():
|
122 |
+
with gr.Column():
|
123 |
+
lora_rank = gr.Slider(label='LoRA Rank', value=32, minimum=0, maximum=1024, step=4, info='Also called dimension count. Higher values = larger file, more content control. Smaller values = smaller file, less control. Use 4 or 8 for style, 128 or 256 to teach, 1024+ for fine-detail on big data. More VRAM is needed for higher ranks.')
|
124 |
+
lora_alpha = gr.Slider(label='LoRA Alpha', value=64, minimum=0, maximum=2048, step=4, info='This divided by the rank becomes the scaling of the LoRA. Higher means stronger. A good standard value is twice your Rank.')
|
125 |
+
batch_size = gr.Slider(visible= False, label='Batch Size', value=0, minimum=0, maximum=1024, step=4, info='Now Replaced with Gradient accumulation. Keeping it for sake of old saved data')
|
126 |
+
micro_batch_size = gr.Slider(label='True Batch Size', value=4, minimum=1, maximum=128, step=1, info='Specifies how many text blocks per step will be trained. The higher value, the better the concept of training will be, but it requires more GPU memory and it reduces speed.')
|
127 |
+
grad_accumulation = gr.Slider(label='Gradient Accumulation Steps', value=1, minimum=1, maximum=256, step=1, info="Virtually multiplies the Batch Size by averaging the learning over more than one step. VRAM friendly. Evens out loss fluctuations but can also degrade training fidelity.")
|
128 |
+
|
129 |
+
with gr.Column():
|
130 |
+
stop_at_loss = gr.Slider(label='Stop at loss (Can be changed during training)', minimum=0.0, maximum=3.0, step=0.1, value=0.00, info='The process will automatically stop once the desired loss value is reached.')
|
131 |
+
gr.Markdown(" ")
|
132 |
+
epochs = gr.Number(label='Epochs', value=3, info='Number of times every entry in the dataset should be fed into training. So 1 means feed each item in once, 5 means feed it in five times, etc.')
|
133 |
+
learning_rate = gr.Textbox(label='Learning Rate', value='3e-4', info='In scientific notation. 3e-4 is a good starting base point. 1e-2 is extremely high, 1e-6 is extremely low.')
|
134 |
+
lr_scheduler_type = gr.Dropdown(label='LR Scheduler', value='linear', choices=['linear', 'constant', 'constant_with_warmup', 'cosine', 'cosine_with_restarts', 'polynomial', 'inverse_sqrt', 'FP_low_epoch_annealing', 'FP_half_time_annealing','FP_raise_fall_creative'], info='Learning rate scheduler - defines how the learning rate changes over time. Custom schedulers: FP_low_epoch_annealing, FP_half_time_annealing, FP_raise_fall_creative (see README)', elem_classes=['slim-dropdown'])
|
135 |
+
|
136 |
+
with gr.Accordion(label='Checkpoints', open=True):
|
137 |
+
with gr.Row():
|
138 |
+
with gr.Column():
|
139 |
+
save_steps = gr.Number(label='Save every n steps', value=0, info='A checkpoint will be saved every n steps and at each Epoch boundary. (0 = OFF)')
|
140 |
+
with gr.Column():
|
141 |
+
save_steps_under_loss = gr.Slider(label='Save at 10% Loss change', value=1.8, minimum=0.0, maximum=3.0, step=0.1, info="Saves checkpoints at (or bellow) this loss and then each time loss falls by at least 10% This works independently from 'Save every n steps'")
|
142 |
+
with gr.Row():
|
143 |
+
save_chackpoint_now = gr.Button('Queue Checkpoint Now')
|
144 |
+
|
145 |
+
with gr.Accordion(label='Advanced Options', open=True):
|
146 |
+
with gr.Row():
|
147 |
+
with gr.Column():
|
148 |
+
warmup_steps = gr.Number(label='Warmup Steps', value=100, info='Number of max steps used for a linear warmup. Reduces early over-fitting by the first training blocks. Value has precedent over Warmup Ratio. Aligns to the closest multiple of graddient accumulation')
|
149 |
+
warmup_ratio = gr.Slider(label='Warmup Ratio', minimum=0.0, maximum=0.2, step=0.025, value=0.0, info='Ratio of total training steps that will be used for a linear warmup. It applies only if Warmup Step is 0.')
|
150 |
+
neft_noise_alpha = gr.Slider(label='NEFtune noise scale', minimum=0.0, maximum=15, step=1, value=0.0, info='Add noise to the training to improve generalization. [0 - OFF, Starting value to experiment: 5]')
|
151 |
+
training_projection = gr.Radio(value = train_choices[4], label='LLaMA Target Projections', info='Change the targets (LORA is typically q-v)', choices=train_choices)
|
152 |
+
lora_dropout = gr.Slider(label='LoRA Dropout', minimum=0.0, maximum=1.0, step=0.025, value=0.05, info='Percentage probability for dropout of LoRA layers. This can help reduce overfitting. Most users should leave at default.')
|
153 |
+
optimizer = gr.Dropdown(label='Optimizer', value='adamw_torch', choices=['adamw_hf', 'adamw_torch', 'adamw_torch_fused', 'adamw_torch_xla', 'adamw_apex_fused', 'adafactor', 'adamw_bnb_8bit', 'adamw_anyprecision', 'sgd', 'adagrad'], info='Different optimizer implementation options, for advanced users. Effects of different options are not well documented yet.', elem_classes=['slim-dropdown'])
|
154 |
+
|
155 |
+
with gr.Column():
|
156 |
+
train_only_after = gr.Textbox(label='Train Only After', value='', info='Only consider text *after* this string in any given chunk for training. For Alpaca datasets, use "### Response:" to only train the response and ignore the input.')
|
157 |
+
add_bos_token = gr.Checkbox(label='Add BOS token', value=True, info="Adds BOS token for each dataset item")
|
158 |
+
add_eos_token = gr.Checkbox(label='Add EOS token', value=False, info="Adds EOS token for each dataset item")
|
159 |
+
add_eos_token_type = gr.Dropdown(label='EOS placement (Text file)', choices=['Every Block', 'Hard Cut Blocks Only'], value='Every Block', info='', allow_custom_value = False)
|
160 |
+
|
161 |
+
higher_rank_limit = gr.Checkbox(label='Enable higher ranks', value=False, info='If checked, changes Rank/Alpha slider above to go much higher. This will not work without a datacenter-class GPU.')
|
162 |
+
report_to = gr.Radio(label="Save detailed logs with", value="None", choices=["None", "wandb", "tensorboard"], interactive=True)
|
163 |
+
# for future
|
164 |
+
#with gr.Accordion(label='Dynamic Scheduler', open = False):
|
165 |
+
# ds_min_epochs = gr.Number(label='Minimum Epochs', value='1', info='Minimum epochs that will be always performed before ramp down can be triggered')
|
166 |
+
# ds_max_epochs = gr.Number(label='Maximum Epochs (fallback)', value='50', info='Maximum Epochs before the training will bail out completely (should be a large number)')
|
167 |
+
# ds_loss_trigger = gr.Slider(label='Trigger Loss', minimum=0.0, maximum=2.8, step=0.1, value=1.6, info='Loss at which the ramp down schedule will be triggered')
|
168 |
+
# ds_loss_rolling_window = gr.Number(label='Loss rolling average', value='4', info='Calculate loss by averaging last x numbers to avoid jumps and noise')
|
169 |
+
# ds_epochs_to_ramp = gr.Slider(label='Ramp down ratio', minimum=0.0, maximum=2.0, step=0.1, value=1.00, info='How long the ramp down will last relative to ellapsed steps (before trigger)')
|
170 |
+
# gr.Markdown('These are settings for FP_dynamic_loss_trigger scheduler. The scheduler will do warm up, then hold constant untill a loss falls under Trigger Loss, then it will commence linear ramp down schedule and stop. The length of ramp down is set by Ramp down ratio where (ramp down steps) = ratio * (elapsed steps). (The time to completition shown will be very high untill ramp down is triggered.)')
|
171 |
+
|
172 |
+
|
173 |
+
with gr.Column():
|
174 |
+
with gr.Tab(label='Formatted Dataset'):
|
175 |
+
with gr.Row():
|
176 |
+
with gr.Column():
|
177 |
+
with gr.Row():
|
178 |
+
dataset = gr.Dropdown(choices=get_datasets('training/datasets', 'json'), value='None', label='Dataset', info='The dataset file to use for training.', elem_classes=['slim-dropdown'])
|
179 |
+
create_refresh_button(dataset, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'json')}, 'refresh-button')
|
180 |
+
with gr.Row():
|
181 |
+
eval_dataset = gr.Dropdown(choices=get_datasets('training/datasets', 'json'), value='None', label='Evaluation Dataset', info='The (optional) dataset file used to evaluate the model after training.', elem_classes=['slim-dropdown'])
|
182 |
+
create_refresh_button(eval_dataset, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'json')}, 'refresh-button')
|
183 |
+
|
184 |
+
with gr.Column():
|
185 |
+
with gr.Row():
|
186 |
+
format = gr.Dropdown(choices=get_datasets('training/formats', 'json'), value='None', label='Data Format', info='The format file used to decide how to format the dataset input.', elem_classes=['slim-dropdown'])
|
187 |
+
create_refresh_button(format, lambda: None, lambda: {'choices': get_datasets('training/formats', 'json')}, 'refresh-button')
|
188 |
+
with gr.Row():
|
189 |
+
eval_steps = gr.Number(label='Evaluate every n steps', value=100, info='If an evaluation dataset is given, test it every time this many steps pass.')
|
190 |
+
|
191 |
+
with gr.Tab(label="Text file"):
|
192 |
+
with gr.Row():
|
193 |
+
raw_text_file = gr.Dropdown(choices=get_datasets('training/datasets', 'txt'), value='None', label='Text file', info='The text file to use for training.', elem_classes=['slim-dropdown'])
|
194 |
+
create_refresh_button(raw_text_file, lambda: None, lambda: {'choices': get_datasets('training/datasets', 'txt')}, 'refresh-button')
|
195 |
+
|
196 |
+
with gr.Row():
|
197 |
+
with gr.Column():
|
198 |
+
precize_slicing_overlap = gr.Checkbox(label='Add Overlapping blocks', value = True)
|
199 |
+
sliding_window = gr.Checkbox(label='DEMENTOR Long-form Learning by FP (Highly Experimental, use low epochs)', value = False, info='Deep Memorization Enforcement Through Overlapping and Repetition. (I named it, so shush). Special process for learning long-form text using low amount of epochs.')
|
200 |
+
#debug_slicer = gr.Checkbox(label='Dump sentencelist.json to logs', value = non_serialized_params['debug_slicer'], info='Debug Slicer')
|
201 |
+
|
202 |
+
with gr.Column():
|
203 |
+
hard_cut_string = gr.Textbox(label='Hard Cut String', value='\\n\\n\\n', info='String that indicates a cut between logical blocks of text (ex. Ideas or Chapters). Helps prevent unwanted overlap between unrelated ideas.')
|
204 |
+
min_chars = gr.Number(label='Ignore small blocks', value=0, info='Ignore Text blocks that have less or equal characters than this number.')
|
205 |
+
with gr.Tab(label="URL"):
|
206 |
+
with gr.Row():
|
207 |
+
with gr.Column():
|
208 |
+
download_file_url = gr.Textbox(label='Download JSON or txt file to datasets (or formats) folder', value='',info='The URL of a file to download. If on github, make sure you get url of the raw file (https://raw.githubusercontent.com/...). If huggin face, make sure the url has /resolve/ in it not /blob/')
|
209 |
+
with gr.Row():
|
210 |
+
download_check_overwrite = gr.Checkbox(label='Overwrite', value=False, info='Overwrite if file exist')
|
211 |
+
download_folder = gr.Radio(label="Destination", value='training/datasets', choices=['training/datasets', 'training/formats'], interactive=True)
|
212 |
+
download_button = gr.Button('Download')
|
213 |
+
download_status = gr.Textbox(label='Download Status', value='', interactive=False)
|
214 |
+
with gr.Row():
|
215 |
+
with gr.Column():
|
216 |
+
with gr.Row():
|
217 |
+
cutoff_len = gr.Slider(label='Chunk Length (Cutoff Length)', minimum=32, maximum=2048, value=256, step=32, info='The maximum length of a chunk (in tokens). Applies to both JSON dataset and text files. Higher values require much more VRAM.')
|
218 |
+
with gr.Row():
|
219 |
+
with gr.Column():
|
220 |
+
check_dataset_btn = gr.Button('Verify Dataset/Text File and suggest data entries')
|
221 |
+
check_dataset_txt = gr.Textbox(label='Dataset info', value='')
|
222 |
+
|
223 |
+
with gr.Row():
|
224 |
+
start_button = gr.Button("Start LoRA Training", variant='primary')
|
225 |
+
stop_button = gr.Button("Interrupt")
|
226 |
+
|
227 |
+
with gr.Accordion(label="Graph", open=True):
|
228 |
+
with gr.Row():
|
229 |
+
# show_actions_button = False - we use old gradio
|
230 |
+
plot_graph = gr.LinePlot(x="epoch", y="value", title="Loss Metrics", overlay_point=True, tooltip=["epoch", "value"], x_lim=[0, 1], y_lim=[0, 3.5], width=500, height=250)
|
231 |
+
|
232 |
+
output = gr.Markdown(value="Ready")
|
233 |
+
|
234 |
+
with gr.Tab('Perplexity evaluation', elem_id='evaluate-tab'):
|
235 |
+
with gr.Row():
|
236 |
+
with gr.Column():
|
237 |
+
models = gr.Dropdown(utils.get_available_models(), label='Models', multiselect=True)
|
238 |
+
evaluate_text_file = gr.Dropdown(choices=['wikitext', 'ptb', 'ptb_new'] + get_datasets('training/datasets', 'txt')[1:], value='wikitext', label='Input dataset', info='The text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.')
|
239 |
+
with gr.Row():
|
240 |
+
with gr.Column():
|
241 |
+
stride_length = gr.Slider(label='Stride', minimum=1, maximum=2048, value=512, step=1, info='Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.')
|
242 |
+
|
243 |
+
with gr.Column():
|
244 |
+
max_length = gr.Slider(label='max_length', minimum=0, maximum=shared.settings['truncation_length_max'], value=0, step=1, info='The context for each evaluation. If set to 0, the maximum context length for the model will be used.')
|
245 |
+
|
246 |
+
with gr.Row():
|
247 |
+
start_current_evaluation = gr.Button("Evaluate loaded model")
|
248 |
+
start_evaluation = gr.Button("Evaluate selected models")
|
249 |
+
stop_evaluation = gr.Button("Interrupt")
|
250 |
+
|
251 |
+
with gr.Column():
|
252 |
+
evaluation_log = gr.Markdown(value='')
|
253 |
+
|
254 |
+
evaluation_table = gr.Dataframe(value=generate_markdown_table(), interactive=True)
|
255 |
+
with gr.Row():
|
256 |
+
save_comments = gr.Button('Save comments', elem_classes="small-button")
|
257 |
+
refresh_table = gr.Button('Refresh the table', elem_classes="small-button")
|
258 |
+
|
259 |
+
# Training events
|
260 |
+
all_params = [lora_name, always_override, save_steps, micro_batch_size, batch_size, epochs, learning_rate, lr_scheduler_type, lora_rank, lora_alpha, lora_dropout, cutoff_len, dataset, eval_dataset, format, eval_steps, raw_text_file, higher_rank_limit, warmup_steps, optimizer, hard_cut_string, train_only_after, stop_at_loss, add_eos_token, min_chars, report_to, precize_slicing_overlap, add_eos_token_type, save_steps_under_loss, add_bos_token, training_projection,sliding_window,warmup_ratio,grad_accumulation, neft_noise_alpha]
|
261 |
+
|
262 |
+
def fix_old_version(batch_size_val,micro_batch_size_val, grad_accumulation_val):
|
263 |
+
if batch_size_val>0:
|
264 |
+
gradient_acc = batch_size_val // micro_batch_size_val
|
265 |
+
print(f"Using Old version of Batch Size ({batch_size_val}) to set Gradient Accumulation: {gradient_acc}")
|
266 |
+
return gradient_acc
|
267 |
+
|
268 |
+
return grad_accumulation_val
|
269 |
+
|
270 |
+
|
271 |
+
copy_from.change(partial(do_copy_params, all_params= all_params), copy_from, all_params).then(fix_old_version,[batch_size,micro_batch_size, grad_accumulation],grad_accumulation)
|
272 |
+
start_button.click(do_train, all_params, [output,plot_graph])
|
273 |
+
stop_button.click(do_interrupt, None, None, queue=False)
|
274 |
+
higher_rank_limit.change(change_rank_limit, [higher_rank_limit], [lora_rank, lora_alpha])
|
275 |
+
|
276 |
+
def trigger_stop_at_loss(stop_at_loss_value):
|
277 |
+
non_serialized_params.update({"stop_at_loss": stop_at_loss_value})
|
278 |
+
if non_serialized_params['training_loop']:
|
279 |
+
print(f"Queue: [Stop at loss Change] to {stop_at_loss_value}")
|
280 |
+
|
281 |
+
|
282 |
+
stop_at_loss.change(trigger_stop_at_loss, stop_at_loss, None)
|
283 |
+
|
284 |
+
def trigger_save_checkpoint():
|
285 |
+
non_serialized_params.update({"save_checkpoint_now": True})
|
286 |
+
if non_serialized_params['training_loop']:
|
287 |
+
print("Queue: [Save checkpoint] Checkpoint will be saved after the current step is finished.")
|
288 |
+
else:
|
289 |
+
print("Use during the training to save the checkpoint at any time.")
|
290 |
+
|
291 |
+
|
292 |
+
def update_button():
|
293 |
+
return gr.Button.update('[Checkpoint in Queue]', variant='stop', interactive=True)
|
294 |
+
|
295 |
+
def update_button2():
|
296 |
+
time.sleep(1.0)
|
297 |
+
return gr.Button.update('Queue Checkpoint Now', variant='secondary',interactive = True)
|
298 |
+
|
299 |
+
save_chackpoint_now.click(trigger_save_checkpoint, None, None).then(update_button, None,save_chackpoint_now).then(update_button2, None,save_chackpoint_now)
|
300 |
+
|
301 |
+
dataset_calc_params = [save_steps,micro_batch_size, epochs, cutoff_len, dataset, format, raw_text_file, warmup_steps, hard_cut_string, min_chars, precize_slicing_overlap,sliding_window,warmup_ratio,grad_accumulation]
|
302 |
+
|
303 |
+
def check_dataset(save_steps:int, micro_batch_size: int, epochs: int, cutoff_len: int, dataset:str, format:str, raw_text_file:str, warmup_steps:int, hard_cut_string:str, min_chars:int, precize_slicing_overlap:bool,sliding_window:bool,warmup_ratio:float,grad_accumulation:int):
|
304 |
+
result = "Specify JSON dastaset or Text file"
|
305 |
+
total_blocks = 0
|
306 |
+
if shared.tokenizer is None:
|
307 |
+
yield "Tokenizer is not available. Please Load some Model first."
|
308 |
+
return
|
309 |
+
|
310 |
+
|
311 |
+
if raw_text_file not in ['None', '']:
|
312 |
+
logger.info("Loading Text file...")
|
313 |
+
fullpath = clean_path('training/datasets', f'{raw_text_file}')
|
314 |
+
fullpath = Path(fullpath)
|
315 |
+
if fullpath.is_dir():
|
316 |
+
logger.info('Training path directory {}'.format(raw_text_file))
|
317 |
+
raw_text = ""
|
318 |
+
file_paths = sorted(fullpath.glob('*.txt'), key=lambda path: natural_keys(path.name))
|
319 |
+
for file_path in file_paths:
|
320 |
+
if file_path.is_file():
|
321 |
+
with file_path.open('r', encoding='utf-8') as file:
|
322 |
+
raw_text += file.read().replace('\r', '')
|
323 |
+
|
324 |
+
logger.info(f"Loaded training file: {file_path.name}")
|
325 |
+
else:
|
326 |
+
try:
|
327 |
+
with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file:
|
328 |
+
raw_text = file.read().replace('\r', '')
|
329 |
+
except:
|
330 |
+
yield f"{raw_text_file}.txt doesn't seem to exsist anymore... check your training/datasets folder"
|
331 |
+
return
|
332 |
+
|
333 |
+
|
334 |
+
if min_chars<0:
|
335 |
+
min_chars = 0
|
336 |
+
|
337 |
+
# == New more precise slicing on sentence boundary ==
|
338 |
+
if sliding_window:
|
339 |
+
text_chunks = sliding_block_cut(raw_text, min_chars, False, cutoff_len, hard_cut_string,non_serialized_params['debug_slicer'])
|
340 |
+
else:
|
341 |
+
text_chunks = precise_cut(raw_text, precize_slicing_overlap, min_chars, False, cutoff_len, hard_cut_string,non_serialized_params['debug_slicer'])
|
342 |
+
|
343 |
+
total_blocks = len(text_chunks)
|
344 |
+
result = f"Text: ({raw_text_file}.txt) has {total_blocks} blocks (Block Size {cutoff_len} tokens)"
|
345 |
+
del text_chunks
|
346 |
+
|
347 |
+
else:
|
348 |
+
if dataset in ['None', '']:
|
349 |
+
yield "Select dataset or text file."
|
350 |
+
return
|
351 |
+
|
352 |
+
if format in ['None', '']:
|
353 |
+
yield "Select format choice for dataset."
|
354 |
+
return
|
355 |
+
|
356 |
+
with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile:
|
357 |
+
format_data: dict[str, str] = json.load(formatFile)
|
358 |
+
|
359 |
+
def generate_prompt(data_point: dict[str, str]):
|
360 |
+
for options, data in format_data.items():
|
361 |
+
if set(options.split(',')) == set(x[0] for x in data_point.items() if (type(x[1]) is str and len(x[1].strip()) > 0)):
|
362 |
+
for key, val in data_point.items():
|
363 |
+
if type(val) is str:
|
364 |
+
data = data.replace(f'%{key}%', val)
|
365 |
+
return data
|
366 |
+
raise RuntimeError(f'Data-point "{data_point}" has no keyset match within format "{list(format_data.keys())}"')
|
367 |
+
|
368 |
+
def tokenize_dummy(prompt):
|
369 |
+
|
370 |
+
input_ids = shared.tokenizer.encode(prompt, truncation=True, max_length=cutoff_len)
|
371 |
+
labels = [1] * len(input_ids)
|
372 |
+
input_ids = torch.tensor(input_ids)
|
373 |
+
return {
|
374 |
+
"input_ids": input_ids,
|
375 |
+
"labels": labels,
|
376 |
+
"attention_mask": input_ids.ne(shared.tokenizer.pad_token_id),
|
377 |
+
}
|
378 |
+
|
379 |
+
def generate_and_tokenize_prompt(data_point):
|
380 |
+
prompt = generate_prompt(data_point)
|
381 |
+
return tokenize_dummy(prompt)
|
382 |
+
|
383 |
+
logger.info("Loading JSON datasets...")
|
384 |
+
data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json'))
|
385 |
+
|
386 |
+
data_keys = []
|
387 |
+
|
388 |
+
if data:
|
389 |
+
if 'train' in data: # Check if the 'train' split exists in the dataset
|
390 |
+
data_keys = list(data['train'][0].keys())
|
391 |
+
print("Data Keys:", data_keys)
|
392 |
+
else:
|
393 |
+
print("The dataset is empty.")
|
394 |
+
|
395 |
+
train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
|
396 |
+
total_blocks = train_data.num_rows
|
397 |
+
|
398 |
+
result = f"Dataset: ({dataset}.json) has {total_blocks} blocks @ length = {cutoff_len} tokens\n(Keys: {data_keys} - Format: {format}.json): "
|
399 |
+
|
400 |
+
#for options, data in format_data.items():
|
401 |
+
# format_keys = options.split(',')
|
402 |
+
# result += f"{format_keys}, "
|
403 |
+
#result = result.rstrip()
|
404 |
+
#result = result.rstrip(',')
|
405 |
+
|
406 |
+
if total_blocks>0:
|
407 |
+
number_ofSteps = int(math.ceil(total_blocks / micro_batch_size) * epochs)
|
408 |
+
num_stepsPer_epoch = int(math.ceil(number_ofSteps/epochs))
|
409 |
+
min_warm = math.ceil(100 / grad_accumulation)
|
410 |
+
|
411 |
+
warmup_steps_suggest = min(int(min_warm*grad_accumulation), int(math.ceil(number_ofSteps * 0.1)))
|
412 |
+
warmup_steps_suggest = min(warmup_steps_suggest,num_stepsPer_epoch)
|
413 |
+
|
414 |
+
save_each_n_min = int(math.ceil(number_ofSteps/10))
|
415 |
+
save_each_n_max = int(math.ceil(number_ofSteps/5))
|
416 |
+
gradient_accumulation_max = int(total_blocks)//micro_batch_size
|
417 |
+
|
418 |
+
|
419 |
+
result += f"\n[Batch Size: {micro_batch_size}, Epochs: {epochs}, Gradient Accumulation: {grad_accumulation}]\n"
|
420 |
+
result += f"Total number of steps: {number_ofSteps}\n"
|
421 |
+
result += f"Steps per each Epoch: {num_stepsPer_epoch}\n"
|
422 |
+
result += f"Suggestions:\n"
|
423 |
+
result += f"Checkpoints: Save every {save_each_n_min} - {save_each_n_max} steps (Current: {int(save_steps)})\n"
|
424 |
+
result += f"Warmup steps: {warmup_steps_suggest} (Current: {int(warmup_steps)})"
|
425 |
+
if gradient_accumulation_max < grad_accumulation:
|
426 |
+
result += f"\n\nWARNING: Gradient Accumulation {grad_accumulation} is too high: It should be below {gradient_accumulation_max}"
|
427 |
+
|
428 |
+
|
429 |
+
yield result
|
430 |
+
return
|
431 |
+
|
432 |
+
check_dataset_btn.click(check_dataset, dataset_calc_params ,check_dataset_txt)
|
433 |
+
|
434 |
+
# Evaluation events. For some reason, the interrupt event
|
435 |
+
# doesn't work with the .then() syntax, so I write them one
|
436 |
+
# by one in this ugly but functional way.
|
437 |
+
ev = start_evaluation.click(calculate_perplexity, [models, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False)
|
438 |
+
start_evaluation.click(generate_markdown_table, None, evaluation_table, show_progress=False)
|
439 |
+
|
440 |
+
start_current_evaluation.click(lambda: ['current model'], None, tmp)
|
441 |
+
ev_cur = start_current_evaluation.click(calculate_perplexity, [tmp, evaluate_text_file, stride_length, max_length], evaluation_log, show_progress=False)
|
442 |
+
start_current_evaluation.click(generate_markdown_table, None, evaluation_table, show_progress=False)
|
443 |
+
|
444 |
+
stop_evaluation.click(None, None, None, cancels=[ev, ev_cur], queue=False)
|
445 |
+
refresh_table.click(generate_markdown_table, None, evaluation_table, show_progress=True)
|
446 |
+
save_comments.click(
|
447 |
+
save_past_evaluations, evaluation_table, None).then(
|
448 |
+
lambda: "Comments saved.", None, evaluation_log, show_progress=False)
|
449 |
+
|
450 |
+
def reload_lora():
|
451 |
+
return gr.Dropdown.update(choices=get_available_loras_local(non_serialized_params['Lora_sortedByTime']))
|
452 |
+
|
453 |
+
# nonserialized items
|
454 |
+
|
455 |
+
sort_byTime.change(lambda x: non_serialized_params.update({"Lora_sortedByTime": x}), sort_byTime, None).then(reload_lora,None,copy_from)
|
456 |
+
#debug_slicer.change(lambda x: non_serialized_params.update({"debug_slicer": x}), debug_slicer, None)
|
457 |
+
|
458 |
+
def update_dataset():
|
459 |
+
return gr.update(choices=get_datasets('training/datasets', 'json')), gr.update(choices=get_datasets('training/datasets', 'txt'))
|
460 |
+
|
461 |
+
download_button.click(download_file_from_url, [download_file_url,download_check_overwrite,download_folder] , download_status).then(update_dataset,None,[dataset , raw_text_file])
|
462 |
+
|
463 |
+
def get_datasets(path: str, ext: str):
|
464 |
+
# include subdirectories for raw txt files to allow training from a subdirectory of txt files
|
465 |
+
#if ext == "txt":
|
466 |
+
# return ['None'] + sorted(set([k.stem for k in list(Path(path).glob('txt')) + list(Path(path).glob('*/')) if k.stem != 'put-trainer-datasets-here']), key=natural_keys)
|
467 |
+
|
468 |
+
return ['None'] + sorted(set([k.stem for k in Path(path).glob(f'*.{ext}') if k.stem != 'put-trainer-datasets-here']), key=natural_keys)
|
469 |
+
|
470 |
+
def do_interrupt():
|
471 |
+
global WANT_INTERRUPT
|
472 |
+
WANT_INTERRUPT = True
|
473 |
+
|
474 |
+
|
475 |
+
def do_copy_params(lora_name: str, all_params):
|
476 |
+
|
477 |
+
if lora_name:
|
478 |
+
f_name = f"{shared.args.lora_dir}/{clean_path(None, lora_name)}/training_parameters.json"
|
479 |
+
if Path(f_name).is_file():
|
480 |
+
with open(f_name, 'r', encoding='utf-8') as format_file:
|
481 |
+
params: dict[str, str] = json.load(format_file)
|
482 |
+
else:
|
483 |
+
params = {}
|
484 |
+
else:
|
485 |
+
params = {}
|
486 |
+
|
487 |
+
result = list()
|
488 |
+
for i in range(0, len(PARAMETERS)):
|
489 |
+
key = PARAMETERS[i]
|
490 |
+
if key in params:
|
491 |
+
result.append(params[key])
|
492 |
+
else:
|
493 |
+
result.append(all_params[i])
|
494 |
+
|
495 |
+
return result
|
496 |
+
|
497 |
+
|
498 |
+
def change_rank_limit(use_higher_ranks: bool):
|
499 |
+
mult = 2 if use_higher_ranks else 1
|
500 |
+
return {"maximum": 1024 * mult, "__type__": "update"}, {"maximum": 2048 * mult, "__type__": "update"}
|
501 |
+
|
502 |
+
|
503 |
+
def clean_path(base_path: str, path: str):
|
504 |
+
"""Strips unusual symbols and forcibly builds a path as relative to the intended directory."""
|
505 |
+
path = path.replace('\\', '/').replace('..', '_')
|
506 |
+
if base_path is None:
|
507 |
+
return path
|
508 |
+
|
509 |
+
return f'{Path(base_path).absolute()}/{path}'
|
510 |
+
|
511 |
+
|
512 |
+
def backup_adapter(input_folder):
|
513 |
+
# Get the creation date of the file adapter_model.bin
|
514 |
+
try:
|
515 |
+
adapter_file = Path(f"{input_folder}/adapter_model.bin")
|
516 |
+
if adapter_file.is_file():
|
517 |
+
|
518 |
+
logger.info("Backing up existing LoRA adapter...")
|
519 |
+
creation_date = datetime.fromtimestamp(adapter_file.stat().st_ctime)
|
520 |
+
creation_date_str = creation_date.strftime("Backup-%Y-%m-%d")
|
521 |
+
|
522 |
+
# Create the new subfolder
|
523 |
+
subfolder_path = Path(f"{input_folder}/{creation_date_str}")
|
524 |
+
subfolder_path.mkdir(parents=True, exist_ok=True)
|
525 |
+
|
526 |
+
# Check if the file already exists in the subfolder
|
527 |
+
backup_adapter_file = Path(f"{input_folder}/{creation_date_str}/adapter_model.bin")
|
528 |
+
if backup_adapter_file.is_file():
|
529 |
+
print(" - Backup already exists. Skipping backup process.")
|
530 |
+
return
|
531 |
+
|
532 |
+
# Copy existing files to the new subfolder
|
533 |
+
existing_files = Path(input_folder).iterdir()
|
534 |
+
for file in existing_files:
|
535 |
+
if file.is_file():
|
536 |
+
shutil.copy2(file, subfolder_path)
|
537 |
+
except Exception as e:
|
538 |
+
print("An error occurred in backup_adapter:", str(e))
|
539 |
+
|
540 |
+
|
541 |
+
def calc_trainable_parameters(model):
|
542 |
+
trainable_params = 0
|
543 |
+
all_param = 0
|
544 |
+
for _, param in model.named_parameters():
|
545 |
+
num_params = param.numel()
|
546 |
+
# if using DS Zero 3 and the weights are initialized empty
|
547 |
+
if num_params == 0 and hasattr(param, "ds_numel"):
|
548 |
+
num_params = param.ds_numel
|
549 |
+
|
550 |
+
all_param += num_params
|
551 |
+
if param.requires_grad:
|
552 |
+
trainable_params += num_params
|
553 |
+
|
554 |
+
return trainable_params, all_param
|
555 |
+
|
556 |
+
|
557 |
+
|
558 |
+
def do_train(lora_name: str, always_override: bool, save_steps: int, micro_batch_size: int, batch_size: int, epochs: int, learning_rate: str, lr_scheduler_type: str, lora_rank: int, lora_alpha: int, lora_dropout: float, cutoff_len: int, dataset: str, eval_dataset: str, format: str, eval_steps: int, raw_text_file: str, higher_rank_limit: bool, warmup_steps: int, optimizer: str, hard_cut_string: str, train_only_after: str, stop_at_loss: float, add_eos_token: bool, min_chars: int, report_to: str, precize_slicing_overlap: bool, add_eos_token_type: str, save_steps_under_loss: float, add_bos_token: bool, training_projection: str,sliding_window:bool,warmup_ratio:float, grad_accumulation: int,neft_noise_alpha:float):
|
559 |
+
|
560 |
+
if shared.args.monkey_patch:
|
561 |
+
from alpaca_lora_4bit.monkeypatch.peft_tuners_lora_monkey_patch import (
|
562 |
+
replace_peft_model_with_int4_lora_model
|
563 |
+
)
|
564 |
+
replace_peft_model_with_int4_lora_model()
|
565 |
+
|
566 |
+
global train_log_graph
|
567 |
+
global WANT_INTERRUPT
|
568 |
+
WANT_INTERRUPT = False
|
569 |
+
|
570 |
+
statistics['loss'] = []
|
571 |
+
|
572 |
+
statistics['loss'].append({'epoch': 0, 'value': 0})
|
573 |
+
zero_pd = pd.DataFrame(statistics['loss'])
|
574 |
+
|
575 |
+
# == Input validation / processing ==
|
576 |
+
yield "Preparing the input...", zero_pd
|
577 |
+
lora_file_path = clean_path(None, lora_name)
|
578 |
+
if lora_file_path.strip() == '':
|
579 |
+
yield "Missing or invalid LoRA file name input.", zero_pd
|
580 |
+
return
|
581 |
+
|
582 |
+
lora_file_path = f"{Path(shared.args.lora_dir)}/{lora_file_path}"
|
583 |
+
actual_lr = float(learning_rate)
|
584 |
+
model_type = type(shared.model).__name__
|
585 |
+
|
586 |
+
if model_type in MODEL_CLASSES:
|
587 |
+
model_id = MODEL_CLASSES[model_type]
|
588 |
+
else:
|
589 |
+
model_id = "llama"
|
590 |
+
if model_type == "PeftModelForCausalLM":
|
591 |
+
if len(shared.lora_names) > 0:
|
592 |
+
yield "You are trying to train a LoRA while you already have another LoRA loaded. This will work, but may have unexpected effects. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*", zero_pd
|
593 |
+
logger.warning("Training LoRA over top of another LoRA. May have unexpected effects.")
|
594 |
+
else:
|
595 |
+
yield "Model ID not matched due to LoRA loading. Consider reloading base model. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*", zero_pd
|
596 |
+
logger.warning("Model ID not matched due to LoRA loading. Consider reloading base model.")
|
597 |
+
else:
|
598 |
+
yield "LoRA training has only currently been validated for LLaMA, OPT, GPT-J, and GPT-NeoX models. Unexpected errors may follow. *(Will continue anyway in 5 seconds, press `Interrupt` to stop.)*", zero_pd
|
599 |
+
logger.warning(f"LoRA training has only currently been validated for LLaMA, OPT, GPT-J, and GPT-NeoX models. (Found model type: {model_type})")
|
600 |
+
|
601 |
+
time.sleep(5)
|
602 |
+
|
603 |
+
if shared.args.loader == 'GPTQ-for-LLaMa' and not shared.args.monkey_patch:
|
604 |
+
yield "LoRA training with GPTQ-for-LLaMa requires loading with `--monkey-patch`", zero_pd
|
605 |
+
return
|
606 |
+
|
607 |
+
if cutoff_len <= 0 or micro_batch_size <= 0 or actual_lr <= 0 or lora_rank <= 0 or lora_alpha <= 0:
|
608 |
+
yield "Cannot input zeroes.", zero_pd
|
609 |
+
return
|
610 |
+
|
611 |
+
#in new version we dumped this in favor of grad_accumulation
|
612 |
+
#set it to zero fo new save
|
613 |
+
batch_size = 0
|
614 |
+
|
615 |
+
gradient_accumulation_steps = grad_accumulation #batch_size // micro_batch_size
|
616 |
+
shared.tokenizer.pad_token_id = 0
|
617 |
+
shared.tokenizer.padding_side = "left"
|
618 |
+
|
619 |
+
def encode(text, prepend_bos_token):
|
620 |
+
|
621 |
+
result = shared.tokenizer.encode(text, truncation=True, max_length=cutoff_len)
|
622 |
+
# Check if the first two tokens are BOS
|
623 |
+
if len(result) >= 2 and result[:2] == [shared.tokenizer.bos_token_id, shared.tokenizer.bos_token_id]:
|
624 |
+
result = result[1:]
|
625 |
+
|
626 |
+
if not prepend_bos_token and result[0] == shared.tokenizer.bos_token_id:
|
627 |
+
result = result[1:]
|
628 |
+
return result
|
629 |
+
|
630 |
+
def tokenize(prompt, append_eos_token=False, prepend_bos_token = False):
|
631 |
+
|
632 |
+
if train_only_after == '' or train_only_after not in prompt:
|
633 |
+
input_ids = encode(prompt, prepend_bos_token)
|
634 |
+
|
635 |
+
if append_eos_token and input_ids[-1] != shared.tokenizer.eos_token_id and len(input_ids) < cutoff_len:
|
636 |
+
input_ids.append(shared.tokenizer.eos_token_id)
|
637 |
+
|
638 |
+
input_ids = [shared.tokenizer.pad_token_id] * (cutoff_len - len(input_ids)) + input_ids
|
639 |
+
|
640 |
+
labels = [1] * len(input_ids)
|
641 |
+
else:
|
642 |
+
ind = prompt.index(train_only_after) + len(train_only_after)
|
643 |
+
before_tokens = encode(prompt[:ind], prepend_bos_token)
|
644 |
+
after_tokens = encode(prompt[ind:], False)
|
645 |
+
|
646 |
+
if append_eos_token and after_tokens[-1] != shared.tokenizer.eos_token_id:
|
647 |
+
after_tokens.append(shared.tokenizer.eos_token_id)
|
648 |
+
|
649 |
+
full_length = len(after_tokens) + len(before_tokens)
|
650 |
+
if full_length > cutoff_len:
|
651 |
+
after_tokens = after_tokens[:cutoff_len - len(before_tokens)]
|
652 |
+
else:
|
653 |
+
before_tokens = [shared.tokenizer.pad_token_id] * (cutoff_len - full_length) + before_tokens
|
654 |
+
|
655 |
+
input_ids = before_tokens + after_tokens
|
656 |
+
labels = [-100] * len(before_tokens) + [1] * len(after_tokens)
|
657 |
+
|
658 |
+
input_ids = torch.tensor(input_ids)
|
659 |
+
return {
|
660 |
+
"input_ids": input_ids,
|
661 |
+
"labels": labels,
|
662 |
+
"attention_mask": input_ids.ne(shared.tokenizer.pad_token_id),
|
663 |
+
}
|
664 |
+
|
665 |
+
train_template.clear()
|
666 |
+
|
667 |
+
#reset stuff
|
668 |
+
print(f"*** LoRA: {lora_name} ***")
|
669 |
+
non_serialized_params.update({"stop_at_loss": stop_at_loss})
|
670 |
+
non_serialized_params.update({"save_steps_under_loss": save_steps_under_loss+0.01})
|
671 |
+
non_serialized_params.update({"save_checkpoint_now": False})
|
672 |
+
non_serialized_params.update({"training_loop": False})
|
673 |
+
non_serialized_params.update({"current_stability": 0})
|
674 |
+
non_serialized_params.update({"save_epochs": 0})
|
675 |
+
non_serialized_params.update({"checkpoint_offset": 0})
|
676 |
+
non_serialized_params.update({"epoch_offset": 0})
|
677 |
+
train_log_graph.clear()
|
678 |
+
|
679 |
+
# == Prep the dataset, format, etc ==
|
680 |
+
if raw_text_file not in ['None', '']:
|
681 |
+
train_template["template_type"] = "raw_text"
|
682 |
+
logger.info("Loading text file...")
|
683 |
+
fullpath = clean_path('training/datasets', f'{raw_text_file}')
|
684 |
+
fullpath = Path(fullpath)
|
685 |
+
if fullpath.is_dir():
|
686 |
+
logger.info('Training path directory {}'.format(raw_text_file))
|
687 |
+
raw_text = ""
|
688 |
+
file_paths = sorted(fullpath.glob('*.txt'), key=lambda path: natural_keys(path.name))
|
689 |
+
for file_path in file_paths:
|
690 |
+
if file_path.is_file():
|
691 |
+
with file_path.open('r', encoding='utf-8') as file:
|
692 |
+
raw_text += file.read().replace('\r', '')
|
693 |
+
|
694 |
+
logger.info(f"Loaded training file: {file_path.name}")
|
695 |
+
else:
|
696 |
+
with open(clean_path('training/datasets', f'{raw_text_file}.txt'), 'r', encoding='utf-8') as file:
|
697 |
+
raw_text = file.read().replace('\r', '')
|
698 |
+
|
699 |
+
# FPHAM PRECISE SLICING
|
700 |
+
if min_chars<0:
|
701 |
+
min_chars = 0
|
702 |
+
|
703 |
+
add_EOS_to_all = add_eos_token and add_eos_token_type == 'Every Block'
|
704 |
+
add_EOS_to_HC = add_eos_token and add_eos_token_type != 'Every Block'
|
705 |
+
|
706 |
+
#print (f"add_eos_token {add_eos_token}, add_EOS_to_all {add_EOS_to_all}, add_EOS_to_HC {add_EOS_to_HC}")
|
707 |
+
|
708 |
+
# == New more precise slicing on sentence boundary ==
|
709 |
+
if sliding_window:
|
710 |
+
text_chunks = sliding_block_cut(raw_text, min_chars, add_EOS_to_HC, cutoff_len, hard_cut_string,non_serialized_params['debug_slicer'])
|
711 |
+
else:
|
712 |
+
text_chunks = precise_cut(raw_text, precize_slicing_overlap, min_chars, add_EOS_to_HC, cutoff_len, hard_cut_string,non_serialized_params['debug_slicer'])
|
713 |
+
|
714 |
+
train_data = Dataset.from_list([tokenize(x, add_EOS_to_all, add_bos_token) for x in text_chunks])
|
715 |
+
if add_EOS_to_all:
|
716 |
+
print(f"Added EOS to {len(text_chunks)} blocks")
|
717 |
+
|
718 |
+
print(f"All Data Blocks: {len(text_chunks)}")
|
719 |
+
|
720 |
+
del text_chunks
|
721 |
+
eval_data = None
|
722 |
+
else:
|
723 |
+
if dataset in ['None', '']:
|
724 |
+
yield "Missing dataset choice input, cannot continue.", zero_pd
|
725 |
+
return
|
726 |
+
|
727 |
+
if format in ['None', '']:
|
728 |
+
yield "Missing format choice input, cannot continue.", zero_pd
|
729 |
+
return
|
730 |
+
|
731 |
+
train_template["template_type"] = "dataset"
|
732 |
+
|
733 |
+
with open(clean_path('training/formats', f'{format}.json'), 'r', encoding='utf-8-sig') as formatFile:
|
734 |
+
format_data: dict[str, str] = json.load(formatFile)
|
735 |
+
|
736 |
+
# == store training prompt ==
|
737 |
+
for _, value in format_data.items():
|
738 |
+
prompt_key = f"template_{len(train_template)}"
|
739 |
+
train_template[prompt_key] = value
|
740 |
+
|
741 |
+
def generate_prompt(data_point: dict[str, str]):
|
742 |
+
for options, data in format_data.items():
|
743 |
+
if set(options.split(',')) == set(x[0] for x in data_point.items() if (type(x[1]) is str and len(x[1].strip()) > 0)):
|
744 |
+
for key, val in data_point.items():
|
745 |
+
if type(val) is str:
|
746 |
+
data = data.replace(f'%{key}%', val)
|
747 |
+
return data
|
748 |
+
raise RuntimeError(f'Data-point "{data_point}" has no keyset match within format "{list(format_data.keys())}"')
|
749 |
+
|
750 |
+
def generate_and_tokenize_prompt(data_point):
|
751 |
+
prompt = generate_prompt(data_point)
|
752 |
+
return tokenize(prompt, add_eos_token, add_bos_token)
|
753 |
+
|
754 |
+
logger.info("Loading JSON datasets...")
|
755 |
+
data = load_dataset("json", data_files=clean_path('training/datasets', f'{dataset}.json'))
|
756 |
+
train_data = data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
|
757 |
+
|
758 |
+
print(f"BOS: {add_bos_token} EOS: {add_eos_token}")
|
759 |
+
print(f"Data Blocks: {train_data.num_rows}")
|
760 |
+
|
761 |
+
if eval_dataset == 'None':
|
762 |
+
eval_data = None
|
763 |
+
else:
|
764 |
+
eval_data = load_dataset("json", data_files=clean_path('training/datasets', f'{eval_dataset}.json'))
|
765 |
+
eval_data = eval_data['train'].map(generate_and_tokenize_prompt, new_fingerprint='%030x' % random.randrange(16**30))
|
766 |
+
|
767 |
+
# == We MUST reload model if it went through any previous training, even failed one ==
|
768 |
+
if shared.model_dirty_from_training:
|
769 |
+
selected_model = shared.model_name
|
770 |
+
if selected_model:
|
771 |
+
print("\033[1;31;1m(Model has been modified by previous training, it needs to be reloaded...)\033[0;37;0m")
|
772 |
+
try:
|
773 |
+
yield f"Reloading {selected_model}...", zero_pd
|
774 |
+
reload_model()
|
775 |
+
shared.tokenizer.pad_token_id = 0
|
776 |
+
shared.tokenizer.padding_side = "left"
|
777 |
+
|
778 |
+
if shared.model is not None:
|
779 |
+
print("Model reloaded OK, continue with training.")
|
780 |
+
else:
|
781 |
+
return f"Failed to load {selected_model}."
|
782 |
+
except:
|
783 |
+
exc = traceback.format_exc()
|
784 |
+
logger.error('Failed to reload the model.')
|
785 |
+
print(exc)
|
786 |
+
return exc.replace('\n', '\n\n')
|
787 |
+
|
788 |
+
# == Start prepping the model itself ==
|
789 |
+
if not hasattr(shared.model, 'lm_head') or hasattr(shared.model.lm_head, 'weight'):
|
790 |
+
logger.info("Getting model ready...")
|
791 |
+
# here we can disable gradient checkpoint, by default = true, use_gradient_checkpointing=True
|
792 |
+
prepare_model_for_kbit_training(shared.model)
|
793 |
+
|
794 |
+
# base model is now frozen and should not be reused for any other LoRA training than this one
|
795 |
+
shared.model_dirty_from_training = True
|
796 |
+
print(f"Transformers Model Type: {YELLOW}{model_type}{RESET}")
|
797 |
+
|
798 |
+
if training_projection==train_choices[0]:
|
799 |
+
model_to_lora_modules[model_id] = ["gate_proj","down_proj","up_proj","q_proj","k_proj","v_proj","o_proj"]
|
800 |
+
elif training_projection==train_choices[1]:
|
801 |
+
model_to_lora_modules[model_id] = ["q_proj","k_proj", "v_proj", "o_proj"]
|
802 |
+
elif training_projection==train_choices[2]:
|
803 |
+
model_to_lora_modules[model_id] = ["q_proj","k_proj", "v_proj"]
|
804 |
+
elif training_projection==train_choices[3]:
|
805 |
+
model_to_lora_modules[model_id] = ["k_proj", "v_proj", "down_proj"]
|
806 |
+
else:
|
807 |
+
model_to_lora_modules[model_id] = ["q_proj", "v_proj"]
|
808 |
+
|
809 |
+
|
810 |
+
logger.info("Preparing for training...")
|
811 |
+
config = LoraConfig(
|
812 |
+
r=lora_rank,
|
813 |
+
lora_alpha=lora_alpha,
|
814 |
+
target_modules=model_to_lora_modules[model_id],
|
815 |
+
lora_dropout=lora_dropout,
|
816 |
+
bias="none",
|
817 |
+
task_type="CAUSAL_LM"
|
818 |
+
)
|
819 |
+
|
820 |
+
# == Backup the existing adapter ==
|
821 |
+
if not always_override:
|
822 |
+
backup_adapter(lora_file_path)
|
823 |
+
|
824 |
+
# == get model trainable params
|
825 |
+
model_trainable_params, model_all_params = calc_trainable_parameters(shared.model)
|
826 |
+
|
827 |
+
try:
|
828 |
+
logger.info("Creating LoRA model...")
|
829 |
+
lora_model = get_peft_model(shared.model, config)
|
830 |
+
if not always_override and Path(f"{lora_file_path}/adapter_model.bin").is_file():
|
831 |
+
logger.info("Loading existing LoRA data...")
|
832 |
+
state_dict_peft = torch.load(f"{lora_file_path}/adapter_model.bin")
|
833 |
+
set_peft_model_state_dict(lora_model, state_dict_peft)
|
834 |
+
|
835 |
+
print(f" + Continue Training on {RED}{lora_file_path}/adapter_model.bin{RESET}")
|
836 |
+
|
837 |
+
#load training_log.json if exist
|
838 |
+
|
839 |
+
if Path(f"{lora_file_path}/training_log.json").is_file():
|
840 |
+
with open(f"{lora_file_path}/training_log.json", 'r') as json_file:
|
841 |
+
json_ilog = json.load(json_file)
|
842 |
+
for key, value in json_ilog.items():
|
843 |
+
if key=='current_steps':
|
844 |
+
non_serialized_params.update({"checkpoint_offset": int(value+1)})
|
845 |
+
print(f" + Checkpoints will be saved with offset: {RED}{non_serialized_params['checkpoint_offset']}{RESET}")
|
846 |
+
if key=='epoch':
|
847 |
+
non_serialized_params.update({"epoch_offset": value})
|
848 |
+
print(f" + Epoch offset: {RED}{non_serialized_params['epoch_offset']}{RESET}")
|
849 |
+
|
850 |
+
|
851 |
+
if Path(f"{lora_file_path}/training_graph.json").is_file():
|
852 |
+
try:
|
853 |
+
with open(f"{lora_file_path}/training_graph.json", 'r') as json_file:
|
854 |
+
train_log_graph = json.load(json_file)
|
855 |
+
print(" + Training Graph loaded")
|
856 |
+
except:
|
857 |
+
print(f"Can't read training_graph")
|
858 |
+
|
859 |
+
|
860 |
+
except:
|
861 |
+
yield traceback.format_exc().replace('\n', '\n\n'), zero_pd
|
862 |
+
return
|
863 |
+
|
864 |
+
if shared.args.monkey_patch:
|
865 |
+
from alpaca_lora_4bit.autograd_4bit import Autograd4bitQuantLinear
|
866 |
+
from alpaca_lora_4bit.models import Linear4bitLt
|
867 |
+
for _, m in lora_model.named_modules():
|
868 |
+
if isinstance(m, Autograd4bitQuantLinear) or isinstance(m, Linear4bitLt):
|
869 |
+
if m.is_v1_model:
|
870 |
+
m.zeros = m.zeros.half()
|
871 |
+
m.scales = m.scales.half()
|
872 |
+
|
873 |
+
class Tracked():
|
874 |
+
def __init__(self):
|
875 |
+
self.current_steps = 0
|
876 |
+
self.max_steps = 0
|
877 |
+
self.did_save = False
|
878 |
+
|
879 |
+
tracked = Tracked()
|
880 |
+
actual_save_steps = math.ceil(save_steps / gradient_accumulation_steps)
|
881 |
+
|
882 |
+
class Callbacks(transformers.TrainerCallback):
|
883 |
+
def on_step_begin(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs):
|
884 |
+
tracked.current_steps = state.global_step * gradient_accumulation_steps
|
885 |
+
tracked.max_steps = state.max_steps * gradient_accumulation_steps
|
886 |
+
ssteps10 = int(max(2,(state.max_steps/epochs)*0.1))
|
887 |
+
|
888 |
+
if WANT_INTERRUPT:
|
889 |
+
control.should_epoch_stop = True
|
890 |
+
control.should_training_stop = True
|
891 |
+
else:
|
892 |
+
current_loss = float(train_log.get('loss', 0.0))
|
893 |
+
current_epoch_int = int(float(train_log.get('epoch', 0.0)))
|
894 |
+
|
895 |
+
force_save = False
|
896 |
+
|
897 |
+
current_steps_offset = tracked.current_steps + non_serialized_params['checkpoint_offset']
|
898 |
+
|
899 |
+
folder_save = f"checkpoint-{current_steps_offset}"
|
900 |
+
|
901 |
+
# save if triggered by user
|
902 |
+
if non_serialized_params['save_checkpoint_now']:
|
903 |
+
force_save = True
|
904 |
+
non_serialized_params.update({"save_checkpoint_now": False})
|
905 |
+
print(f"\033[1;31;1mSave Checkpoint manually trigerred.\033[0;37;0m")
|
906 |
+
folder_save = f"checkpoint-{current_steps_offset}-user"
|
907 |
+
|
908 |
+
patience = 3 # Set the number of consecutive steps for tracking stability
|
909 |
+
|
910 |
+
if gradient_accumulation_steps==1:
|
911 |
+
patience = 4
|
912 |
+
|
913 |
+
min_steps = ssteps10
|
914 |
+
|
915 |
+
# Save each time the loss is below the threshold
|
916 |
+
if current_loss < non_serialized_params['save_steps_under_loss'] and current_loss > 0 and state.global_step > min_steps:
|
917 |
+
current_stability = non_serialized_params['current_stability']
|
918 |
+
current_stability += 1
|
919 |
+
non_serialized_params.update({"current_stability": current_stability})
|
920 |
+
|
921 |
+
if current_stability >= patience:
|
922 |
+
current_stability = 0
|
923 |
+
non_serialized_params.update({"current_stability": current_stability})
|
924 |
+
current_loss_dec = round(current_loss, 2)
|
925 |
+
loss_str = f"{current_loss_dec:.2f}"
|
926 |
+
loss_str = loss_str.replace('.', '_')
|
927 |
+
new_save = (current_loss_dec-0.1) + 0.01
|
928 |
+
non_serialized_params.update({"save_steps_under_loss": new_save})
|
929 |
+
|
930 |
+
folder_save = f"checkpoint-{current_steps_offset}-loss-{loss_str}"
|
931 |
+
force_save = True
|
932 |
+
|
933 |
+
|
934 |
+
else:
|
935 |
+
# Reset stability if the loss goes above the threshold
|
936 |
+
non_serialized_params.update({"current_stability": 0})
|
937 |
+
|
938 |
+
# Save full epochs
|
939 |
+
if actual_save_steps>0 and current_epoch_int > non_serialized_params['save_epochs'] and state.global_step > min_steps:
|
940 |
+
|
941 |
+
|
942 |
+
current_epoch_offset = current_epoch_int
|
943 |
+
|
944 |
+
if non_serialized_params['epoch_offset'] > 0:
|
945 |
+
current_epoch_offset = current_epoch_int + round(non_serialized_params['epoch_offset'], 2)
|
946 |
+
|
947 |
+
ep_off_str = f"{current_epoch_offset}"
|
948 |
+
ep_off_str = ep_off_str.replace('.', '_')
|
949 |
+
folder_save = f"checkpoint-{current_steps_offset}-epoch-{ep_off_str}"
|
950 |
+
|
951 |
+
non_serialized_params.update({"save_epochs": current_epoch_int})
|
952 |
+
force_save = True
|
953 |
+
|
954 |
+
# save each actual_save_steps
|
955 |
+
if state.global_step > 0 and actual_save_steps > 0 and state.global_step % actual_save_steps == 0:
|
956 |
+
folder_save = f"checkpoint-{current_steps_offset}"
|
957 |
+
force_save = True
|
958 |
+
|
959 |
+
if force_save:
|
960 |
+
lora_model.save_pretrained(f"{lora_file_path}/{folder_save}/", safe_serialization = non_serialized_params['safe_serialization'])
|
961 |
+
print(f"\033[1;30;40mStep: {tracked.current_steps:6} \033[0;37;0m Saved: [{folder_save}]")
|
962 |
+
# Save log
|
963 |
+
with open(f"{lora_file_path}/{folder_save}/training_log.json", 'w', encoding='utf-8') as file:
|
964 |
+
json.dump(train_log, file, indent=2)
|
965 |
+
# == Save training prompt ==
|
966 |
+
with open(f"{lora_file_path}/{folder_save}/training_prompt.json", 'w', encoding='utf-8') as file:
|
967 |
+
json.dump(train_template, file, indent=2)
|
968 |
+
|
969 |
+
|
970 |
+
def on_substep_end(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs):
|
971 |
+
tracked.current_steps += 1
|
972 |
+
if WANT_INTERRUPT:
|
973 |
+
control.should_epoch_stop = True
|
974 |
+
control.should_training_stop = True
|
975 |
+
|
976 |
+
def on_log(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, logs, **kwargs):
|
977 |
+
train_log.update(logs)
|
978 |
+
|
979 |
+
current_steps_offset = tracked.current_steps + non_serialized_params['checkpoint_offset']
|
980 |
+
current_epoch_offset = train_log.get('epoch', 0.0) + non_serialized_params['epoch_offset']
|
981 |
+
|
982 |
+
train_log.update({"current_steps": tracked.current_steps})
|
983 |
+
train_log.update({"current_steps_adjusted": current_steps_offset})
|
984 |
+
train_log.update({"epoch_adjusted": current_epoch_offset})
|
985 |
+
|
986 |
+
if WANT_INTERRUPT:
|
987 |
+
print("\033[1;31;1mInterrupted by user\033[0;37;0m")
|
988 |
+
|
989 |
+
if non_serialized_params['checkpoint_offset']>0:
|
990 |
+
print(f"\033[1;30;40mStep: {tracked.current_steps:6} [+{non_serialized_params['checkpoint_offset']}] \033[0;37;0m", end='')
|
991 |
+
else:
|
992 |
+
print(f"\033[1;30;40mStep: {tracked.current_steps:6} \033[0;37;0m", end='')
|
993 |
+
|
994 |
+
graphentry = {
|
995 |
+
'current_steps': int(train_log.get('current_steps_adjusted',0)),
|
996 |
+
'loss': float(train_log.get('loss', 0.0)),
|
997 |
+
'learning_rate': float(train_log.get('learning_rate', 0.0)),
|
998 |
+
'epoch': float(train_log.get('epoch_adjusted', 0.0))
|
999 |
+
}
|
1000 |
+
|
1001 |
+
cur_loss = float(train_log.get('loss', 0.0))
|
1002 |
+
cur_lr = float(train_log.get('learning_rate', 0.0))
|
1003 |
+
cur_epoch = float(train_log.get('epoch', 0.0))
|
1004 |
+
|
1005 |
+
if len(statistics['loss']) == 1:
|
1006 |
+
first_epoch = statistics['loss'][0]['epoch']
|
1007 |
+
first_value = statistics['loss'][0]['value']
|
1008 |
+
if first_value ==0:
|
1009 |
+
statistics['loss'] = []
|
1010 |
+
|
1011 |
+
|
1012 |
+
statistics['loss'].append({'epoch': cur_epoch, 'value': cur_loss})
|
1013 |
+
statistics['lr'].append({'epoch': cur_epoch, 'value': cur_lr})
|
1014 |
+
|
1015 |
+
# Add the entry to the continuous log
|
1016 |
+
train_log_graph.append(graphentry)
|
1017 |
+
|
1018 |
+
# Save the graph log for now, we can later generate full graph
|
1019 |
+
with open(f"{lora_file_path}/training_graph.json", 'w') as file:
|
1020 |
+
json.dump(train_log_graph, file, indent=4)
|
1021 |
+
|
1022 |
+
if 'loss' in logs:
|
1023 |
+
loss = float(logs['loss'])
|
1024 |
+
if loss <= stop_at_loss:
|
1025 |
+
control.should_epoch_stop = True
|
1026 |
+
control.should_training_stop = True
|
1027 |
+
print(f"{RED}Stop Loss {stop_at_loss} reached.{RESET}")
|
1028 |
+
|
1029 |
+
# FPHAM SAMPLE REQ Transformers error handling
|
1030 |
+
gradient_accumulation_max = int(train_data.num_rows)//micro_batch_size
|
1031 |
+
|
1032 |
+
if gradient_accumulation_max < gradient_accumulation_steps:
|
1033 |
+
print(f"{RED}WARNING:{RESET} Current gradient accumulation is {RED}too high{RESET} for the amount of training data.")
|
1034 |
+
print(f"Gradient accumulation: {gradient_accumulation_steps} should be less than: {gradient_accumulation_max}. {RED}This could crash Accelerate/Transformers{RESET}")
|
1035 |
+
#min_batchSize = sample_req*micro_batch_size
|
1036 |
+
print(f"Preferable fix: {RED}Increase the size of dataset{RESET}")
|
1037 |
+
print(f"... or Decrerase Gradient Accumulation {RED}{gradient_accumulation_steps}{RESET} to below {GREEN}{gradient_accumulation_max}{RESET}")
|
1038 |
+
gradient_accumulation_steps = max(1,gradient_accumulation_max-1)
|
1039 |
+
print(f"Last resort fix for this run: Lowering Gradient accumulation to {GREEN}{gradient_accumulation_steps}{RESET} [Good luck]")
|
1040 |
+
|
1041 |
+
else:
|
1042 |
+
print(f"Data Size Check: Gradient accumulation: {YELLOW}{gradient_accumulation_steps}{RESET} <= Blocks/Batch {gradient_accumulation_max} ... {GREEN}[OK]{RESET}")
|
1043 |
+
|
1044 |
+
#END OF FPHAM SAMPLE REQ
|
1045 |
+
|
1046 |
+
# FPHAM Custom Scheduler ==
|
1047 |
+
custom_scheduller = False
|
1048 |
+
lr_scheduler_type_arg = lr_scheduler_type
|
1049 |
+
|
1050 |
+
if lr_scheduler_type == 'FP_low_epoch_annealing':
|
1051 |
+
custom_scheduller = True
|
1052 |
+
lr_scheduler_type_arg = 'cosine'
|
1053 |
+
elif lr_scheduler_type == 'FP_half_time_annealing':
|
1054 |
+
custom_scheduller = True
|
1055 |
+
lr_scheduler_type_arg = 'constant'
|
1056 |
+
elif lr_scheduler_type =='FP_raise_fall_creative':
|
1057 |
+
custom_scheduller = True
|
1058 |
+
lr_scheduler_type_arg = 'constant_with_warmup'
|
1059 |
+
|
1060 |
+
#gradient_checkpointing=True
|
1061 |
+
|
1062 |
+
args=transformers.TrainingArguments(
|
1063 |
+
report_to=report_to if report_to != "None" else None,
|
1064 |
+
per_device_train_batch_size=micro_batch_size,
|
1065 |
+
gradient_accumulation_steps=gradient_accumulation_steps,
|
1066 |
+
warmup_steps=math.ceil(warmup_steps / gradient_accumulation_steps),
|
1067 |
+
warmup_ratio = warmup_ratio,
|
1068 |
+
num_train_epochs=epochs,
|
1069 |
+
learning_rate=actual_lr,
|
1070 |
+
fp16=False if shared.args.cpu else True,
|
1071 |
+
optim=optimizer,
|
1072 |
+
logging_steps=1,
|
1073 |
+
evaluation_strategy="steps" if eval_data is not None else "no",
|
1074 |
+
eval_steps=math.ceil(eval_steps / gradient_accumulation_steps) if eval_data is not None else None,
|
1075 |
+
save_strategy="steps" if eval_data is not None else "no",
|
1076 |
+
output_dir=lora_file_path,
|
1077 |
+
lr_scheduler_type=lr_scheduler_type_arg,
|
1078 |
+
load_best_model_at_end=eval_data is not None,
|
1079 |
+
# TODO: Enable multi-device support
|
1080 |
+
ddp_find_unused_parameters=None,
|
1081 |
+
no_cuda=shared.args.cpu,
|
1082 |
+
)
|
1083 |
+
|
1084 |
+
if custom_scheduller:
|
1085 |
+
trainer = FPSchedulerTrainer(
|
1086 |
+
neftune_noise_alpha=neft_noise_alpha,
|
1087 |
+
model=lora_model,
|
1088 |
+
train_dataset=train_data,
|
1089 |
+
eval_dataset=eval_data,
|
1090 |
+
args=args,
|
1091 |
+
data_collator=transformers.DataCollatorForLanguageModeling(shared.tokenizer, mlm=False),
|
1092 |
+
callbacks=list([Callbacks()])
|
1093 |
+
)
|
1094 |
+
elif neft_noise_alpha > 0:
|
1095 |
+
trainer = FPNEFtuneTrainer(
|
1096 |
+
neftune_noise_alpha=neft_noise_alpha,
|
1097 |
+
model=lora_model,
|
1098 |
+
train_dataset=train_data,
|
1099 |
+
eval_dataset=eval_data,
|
1100 |
+
args=args,
|
1101 |
+
data_collator=transformers.DataCollatorForLanguageModeling(shared.tokenizer, mlm=False),
|
1102 |
+
callbacks=list([Callbacks()])
|
1103 |
+
)
|
1104 |
+
else:
|
1105 |
+
trainer = transformers.Trainer(
|
1106 |
+
model=lora_model,
|
1107 |
+
train_dataset=train_data,
|
1108 |
+
eval_dataset=eval_data,
|
1109 |
+
args=args,
|
1110 |
+
data_collator=transformers.DataCollatorForLanguageModeling(shared.tokenizer, mlm=False),
|
1111 |
+
callbacks=list([Callbacks()])
|
1112 |
+
)
|
1113 |
+
|
1114 |
+
# END OF FPHAM CUSTOM SCHEDULER
|
1115 |
+
|
1116 |
+
lora_model.config.use_cache = False
|
1117 |
+
|
1118 |
+
if torch.__version__ >= "2" and sys.platform != "win32":
|
1119 |
+
lora_model = torch.compile(lora_model)
|
1120 |
+
|
1121 |
+
# == Save parameters for reuse ==
|
1122 |
+
with open(f"{lora_file_path}/training_parameters.json", 'w', encoding='utf-8') as file:
|
1123 |
+
vars = locals()
|
1124 |
+
json.dump({x: vars[x] for x in PARAMETERS}, file, indent=2)
|
1125 |
+
|
1126 |
+
# == Save training prompt ==
|
1127 |
+
with open(f"{lora_file_path}/training_prompt.json", 'w', encoding='utf-8') as file:
|
1128 |
+
json.dump(train_template, file, indent=2)
|
1129 |
+
|
1130 |
+
# == Main run and monitor loop ==
|
1131 |
+
logger.info("Starting training...")
|
1132 |
+
yield "Starting...", zero_pd
|
1133 |
+
|
1134 |
+
lora_trainable_param, lora_all_param = calc_trainable_parameters(lora_model)
|
1135 |
+
|
1136 |
+
projections_string = ", ".join([projection.replace("_proj", "") for projection in model_to_lora_modules[model_id]])
|
1137 |
+
|
1138 |
+
print(f"Training '{model_id}' model using {YELLOW}({projections_string}){RESET} projections")
|
1139 |
+
|
1140 |
+
if lora_all_param > 0:
|
1141 |
+
print(f"Trainable params: {lora_trainable_param:,d} ({RED}{100 * lora_trainable_param / lora_all_param:.4f} %{RESET}), All params: {lora_all_param:,d} (Model: {model_all_params:,d})")
|
1142 |
+
|
1143 |
+
train_log.update({"base_model_name": shared.model_name})
|
1144 |
+
train_log.update({"base_model_class": shared.model.__class__.__name__})
|
1145 |
+
train_log.update({"base_loaded_in_4bit": getattr(lora_model, "is_loaded_in_4bit", False)})
|
1146 |
+
train_log.update({"base_loaded_in_8bit": getattr(lora_model, "is_loaded_in_8bit", False)})
|
1147 |
+
train_log.update({"projections": projections_string})
|
1148 |
+
if non_serialized_params['checkpoint_offset'] > 0:
|
1149 |
+
train_log.update({"last_run_steps_offset": non_serialized_params['checkpoint_offset']})
|
1150 |
+
train_log.update({"last_run_epoch_offset": non_serialized_params['epoch_offset']})
|
1151 |
+
|
1152 |
+
|
1153 |
+
if non_serialized_params['checkpoint_offset'] > 0:
|
1154 |
+
print(f"Continue training on {RED}previous adapter{RESET} from epoch: {RED}{non_serialized_params['epoch_offset']}{RESET}")
|
1155 |
+
|
1156 |
+
if stop_at_loss > 0:
|
1157 |
+
print(f"Monitoring loss {RED}(Auto-Stop at: {stop_at_loss}){RESET}")
|
1158 |
+
|
1159 |
+
|
1160 |
+
|
1161 |
+
if WANT_INTERRUPT:
|
1162 |
+
yield "Interrupted before start.", zero_pd
|
1163 |
+
return
|
1164 |
+
|
1165 |
+
def log_train_dataset(trainer):
|
1166 |
+
decoded_entries = []
|
1167 |
+
# Try to decode the entries and write the log file
|
1168 |
+
try:
|
1169 |
+
# Iterate over the first 10 elements in the dataset (or fewer if there are less than 10)
|
1170 |
+
for i in range(min(10, len(trainer.train_dataset))):
|
1171 |
+
decoded_text = shared.tokenizer.decode(trainer.train_dataset[i]['input_ids'])
|
1172 |
+
decoded_entries.append({"value": decoded_text})
|
1173 |
+
|
1174 |
+
# Write the log file
|
1175 |
+
Path('logs').mkdir(exist_ok=True)
|
1176 |
+
with open(Path('logs/train_dataset_sample.json'), 'w') as json_file:
|
1177 |
+
json.dump(decoded_entries, json_file, indent=4)
|
1178 |
+
|
1179 |
+
logger.info("Log file 'train_dataset_sample.json' created in the 'logs' directory.")
|
1180 |
+
except Exception as e:
|
1181 |
+
logger.error(f"Failed to create log file due to error: {e}")
|
1182 |
+
|
1183 |
+
def threaded_run():
|
1184 |
+
log_train_dataset(trainer)
|
1185 |
+
trainer.train()
|
1186 |
+
# Note: save in the thread in case the gradio thread breaks (eg browser closed)
|
1187 |
+
lora_model.save_pretrained(lora_file_path, safe_serialization = non_serialized_params['safe_serialization'])
|
1188 |
+
logger.info("LoRA training run is completed and saved.")
|
1189 |
+
# Save log
|
1190 |
+
with open(f"{lora_file_path}/training_log.json", 'w', encoding='utf-8') as file:
|
1191 |
+
json.dump(train_log, file, indent=2)
|
1192 |
+
|
1193 |
+
thread = threading.Thread(target=threaded_run)
|
1194 |
+
thread.start()
|
1195 |
+
last_step = 0
|
1196 |
+
start_time = time.perf_counter()
|
1197 |
+
|
1198 |
+
while thread.is_alive():
|
1199 |
+
time.sleep(0.5)
|
1200 |
+
|
1201 |
+
if statistics['loss']:
|
1202 |
+
max_value_dict = max(statistics['loss'], key=lambda x: x['value'])
|
1203 |
+
max_value = max_value_dict['value']+0.4
|
1204 |
+
first_epoch = statistics['loss'][0]['epoch']
|
1205 |
+
last_epoch = statistics['loss'][-1]['epoch']
|
1206 |
+
else:
|
1207 |
+
max_value = 3.5
|
1208 |
+
last_epoch = 0
|
1209 |
+
first_epoch = 0
|
1210 |
+
|
1211 |
+
if WANT_INTERRUPT:
|
1212 |
+
|
1213 |
+
losses = gr.LinePlot.update(
|
1214 |
+
value = pd.DataFrame(statistics['loss']),
|
1215 |
+
x="epoch", y="value",
|
1216 |
+
title="Loss Metrics",
|
1217 |
+
overlay_point=True, tooltip=["epoch", "value"],
|
1218 |
+
x_lim=[first_epoch,last_epoch], y_lim=[0,max_value],
|
1219 |
+
width=500, height=250 )
|
1220 |
+
|
1221 |
+
yield "Interrupting, please wait... *(Run will stop after the current training step completes.)*", losses
|
1222 |
+
|
1223 |
+
elif tracked.current_steps != last_step:
|
1224 |
+
last_step = tracked.current_steps
|
1225 |
+
time_elapsed = time.perf_counter() - start_time
|
1226 |
+
lastloss = float(train_log.get('loss', 0.0))
|
1227 |
+
|
1228 |
+
non_serialized_params.update({"training_loop": True})
|
1229 |
+
|
1230 |
+
if lastloss > 0:
|
1231 |
+
lastloss_str = f", ... Current Loss: `{lastloss:.2f}`"
|
1232 |
+
else:
|
1233 |
+
lastloss_str = ""
|
1234 |
+
|
1235 |
+
if time_elapsed <= 0:
|
1236 |
+
timer_info = ""
|
1237 |
+
total_time_estimate = 999
|
1238 |
+
else:
|
1239 |
+
its = tracked.current_steps / time_elapsed
|
1240 |
+
if its > 1:
|
1241 |
+
timer_info = f"`{its:.2f}` it/s"
|
1242 |
+
else:
|
1243 |
+
timer_info = f"`{1.0/its:.2f}` s/it"
|
1244 |
+
|
1245 |
+
total_time_estimate = (1.0 / its) * (tracked.max_steps)
|
1246 |
+
|
1247 |
+
if stop_at_loss != non_serialized_params['stop_at_loss']:
|
1248 |
+
stop_at_loss = non_serialized_params['stop_at_loss']
|
1249 |
+
print(f"Stop at loss changed {RED}(Auto-Stop at: {stop_at_loss}){RESET}")
|
1250 |
+
|
1251 |
+
losses = gr.LinePlot.update(
|
1252 |
+
value = pd.DataFrame(statistics['loss']),
|
1253 |
+
x="epoch", y="value",
|
1254 |
+
title="Loss Metrics",
|
1255 |
+
overlay_point=True, tooltip=["epoch", "value"],
|
1256 |
+
x_lim=[first_epoch,last_epoch], y_lim=[0,max_value],
|
1257 |
+
width=500, height=250 )
|
1258 |
+
|
1259 |
+
|
1260 |
+
yield f"Running... **{tracked.current_steps}** / **{tracked.max_steps}** ... {timer_info}, {format_time(time_elapsed)} / {format_time(total_time_estimate)} ... {format_time(total_time_estimate - time_elapsed)} remaining {lastloss_str}", losses
|
1261 |
+
|
1262 |
+
# Saving in the train thread might fail if an error occurs, so save here if so.
|
1263 |
+
|
1264 |
+
#return_pd = pd.DataFrame(statistics['loss'])
|
1265 |
+
|
1266 |
+
if statistics['loss']:
|
1267 |
+
max_value_dict = max(statistics['loss'], key=lambda x: x['value'])
|
1268 |
+
max_value = max_value_dict['value']+0.4
|
1269 |
+
first_epoch = statistics['loss'][0]['epoch']
|
1270 |
+
last_epoch = statistics['loss'][-1]['epoch']
|
1271 |
+
else:
|
1272 |
+
max_value = 3.5
|
1273 |
+
last_epoch = 0
|
1274 |
+
first_epoch = 0
|
1275 |
+
|
1276 |
+
return_pd = gr.LinePlot.update(
|
1277 |
+
value = pd.DataFrame(statistics['loss']),
|
1278 |
+
x="epoch", y="value",
|
1279 |
+
title="Loss Metrics",
|
1280 |
+
overlay_point=True, tooltip=["epoch", "value"],
|
1281 |
+
x_lim=[first_epoch,last_epoch], y_lim=[0,max_value],
|
1282 |
+
width=500, height=250)
|
1283 |
+
|
1284 |
+
non_serialized_params.update({"training_loop": False})
|
1285 |
+
|
1286 |
+
if not tracked.did_save:
|
1287 |
+
logger.info("Training complete, saving...")
|
1288 |
+
lora_model.save_pretrained(lora_file_path, safe_serialization = non_serialized_params['safe_serialization'])
|
1289 |
+
|
1290 |
+
if WANT_INTERRUPT:
|
1291 |
+
logger.info("Training interrupted.")
|
1292 |
+
yield f"Interrupted by user. LoRA saved to `{lora_file_path}`.", return_pd
|
1293 |
+
else:
|
1294 |
+
logger.info("Training complete!")
|
1295 |
+
yield f"Done! LoRA saved to `{lora_file_path}`.\n\nBefore testing your new LoRA, make sure to first reload the model, as it is currently dirty from training.", return_pd
|
1296 |
+
|
1297 |
+
create_graph(lora_file_path, lora_name)
|
1298 |
+
|
1299 |
+
def format_time(seconds: float):
|
1300 |
+
if seconds < 120:
|
1301 |
+
return f"`{seconds:.0f}` seconds"
|
1302 |
+
|
1303 |
+
minutes = seconds / 60
|
1304 |
+
if minutes < 120:
|
1305 |
+
return f"`{minutes:.0f}` minutes"
|
1306 |
+
|
1307 |
+
hours = minutes / 60
|
1308 |
+
return f"`{hours:.0f}` hours"
|
extensions/Training_PRO/train_utils.py
ADDED
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from modules import shared, utils
|
3 |
+
from pathlib import Path
|
4 |
+
import requests
|
5 |
+
import tqdm
|
6 |
+
import json
|
7 |
+
|
8 |
+
'''
|
9 |
+
def get_gpu_memory_usage(rank):
|
10 |
+
return {
|
11 |
+
'total': round(torch.cuda.get_device_properties(rank).total_memory / (1024**3), 2),
|
12 |
+
'max': round(torch.cuda.max_memory_allocated(rank) / (1024**3), 2),
|
13 |
+
'reserved': round(torch.cuda.memory_reserved(rank) / (1024**3), 2),
|
14 |
+
'allocated': round(torch.cuda.memory_allocated(rank) / (1024**3), 2)
|
15 |
+
}
|
16 |
+
'''
|
17 |
+
|
18 |
+
def list_subfoldersByTime(directory):
|
19 |
+
|
20 |
+
if not directory.endswith('/'):
|
21 |
+
directory += '/'
|
22 |
+
subfolders = []
|
23 |
+
subfolders.append('None')
|
24 |
+
path = directory
|
25 |
+
name_list = os.listdir(path)
|
26 |
+
full_list = [os.path.join(path,i) for i in name_list]
|
27 |
+
time_sorted_list = sorted(full_list, key=os.path.getmtime,reverse=True)
|
28 |
+
|
29 |
+
for entry in time_sorted_list:
|
30 |
+
if os.path.isdir(entry):
|
31 |
+
entry_str = f"{entry}" # Convert entry to a string
|
32 |
+
full_path = entry_str
|
33 |
+
entry_str = entry_str.replace('\\','/')
|
34 |
+
entry_str = entry_str.replace(f"{directory}", "") # Remove directory part
|
35 |
+
subfolders.append(entry_str)
|
36 |
+
|
37 |
+
return subfolders
|
38 |
+
|
39 |
+
def get_available_loras_local(_sortedByTime):
|
40 |
+
|
41 |
+
model_dir = shared.args.lora_dir # Update with the appropriate directory path
|
42 |
+
subfolders = []
|
43 |
+
if _sortedByTime:
|
44 |
+
subfolders = list_subfoldersByTime(model_dir)
|
45 |
+
else:
|
46 |
+
subfolders = utils.get_available_loras()
|
47 |
+
|
48 |
+
return subfolders
|
49 |
+
|
50 |
+
|
51 |
+
# FPHAM SPLIT BY SENTENCE BLOCK ===============
|
52 |
+
|
53 |
+
def split_sentences(text: str, cutoff_len: int):
|
54 |
+
sentences = []
|
55 |
+
sentence = ''
|
56 |
+
delimiters = ['. ', '? ', '! ', '... ', '.\n', '?\n', '!\n','...\n','</s>','<//>']
|
57 |
+
abbreviations = ['Mr. ', 'Mrs. ', 'Dr. ', 'Ms. ', 'St. ', 'Prof. ', 'Jr. ', 'Ltd. ', 'Capt. ', 'Col. ', 'Gen. ', 'Ave. ', 'Blvd. ', 'Co. ', 'Corp. ', 'Dept. ', 'Est. ', 'Gov. ', 'Inc. ', 'Ph.D. ', 'Univ. ']
|
58 |
+
errors = 0
|
59 |
+
max_cut = cutoff_len-1
|
60 |
+
prev_char = ''
|
61 |
+
|
62 |
+
for char in text:
|
63 |
+
sentence += char
|
64 |
+
|
65 |
+
|
66 |
+
if (any(sentence.endswith(delimiter) for delimiter in delimiters) and
|
67 |
+
not (prev_char.isupper() and len(sentence) >= 3 and sentence[-3] != ' ') and
|
68 |
+
not any(sentence.endswith(abbreviation) for abbreviation in abbreviations)):
|
69 |
+
tokens = shared.tokenizer.encode(sentence)
|
70 |
+
|
71 |
+
if len(tokens) > max_cut:
|
72 |
+
tokens = tokens[:max_cut]
|
73 |
+
sentence = shared.tokenizer.decode(tokens, skip_special_tokens=True)
|
74 |
+
errors = errors + 1
|
75 |
+
|
76 |
+
sentences.append({'text': sentence, 'size': len(tokens)})
|
77 |
+
|
78 |
+
sentence = ''
|
79 |
+
|
80 |
+
prev_char = char
|
81 |
+
|
82 |
+
if sentence:
|
83 |
+
tokens = shared.tokenizer.encode(sentence)
|
84 |
+
if len(tokens) > max_cut:
|
85 |
+
tokens = tokens[:max_cut]
|
86 |
+
sentence = shared.tokenizer.decode(tokens, skip_special_tokens=True)
|
87 |
+
errors = errors + 1
|
88 |
+
|
89 |
+
sentences.append({'text': sentence, 'size': len(tokens)})
|
90 |
+
|
91 |
+
if errors > 0:
|
92 |
+
print(f"Trimmed sentences beyond Cutoff Length: {errors}")
|
93 |
+
|
94 |
+
return sentences
|
95 |
+
|
96 |
+
# The goal of following code is to create blocks of text + overlapping blocks while:
|
97 |
+
# respects sentence boundaries
|
98 |
+
# always uses all the text
|
99 |
+
# hard cut defined by hard_cut_string or </s> will always end at the end of data block
|
100 |
+
# no overlapping blocks will be created across hard cut or across </s> token
|
101 |
+
|
102 |
+
def precise_cut(text: str, overlap: bool, min_chars_cut: int, eos_to_hc: bool, cutoff_len: int, hard_cut_string: str, debug_slicer:bool):
|
103 |
+
|
104 |
+
EOSX_str = '<//>' #hardcut placeholder
|
105 |
+
EOS_str = '</s>'
|
106 |
+
print("Precise raw text slicer: ON")
|
107 |
+
|
108 |
+
cut_string = hard_cut_string.replace('\\n', '\n')
|
109 |
+
text = text.replace(cut_string, EOSX_str)
|
110 |
+
sentences = split_sentences(text, cutoff_len)
|
111 |
+
|
112 |
+
print(f"Sentences: {len(sentences)}")
|
113 |
+
sentencelist = []
|
114 |
+
currentSentence = ''
|
115 |
+
totalLength = 0
|
116 |
+
max_cut = cutoff_len-1
|
117 |
+
half_cut = cutoff_len//2
|
118 |
+
halfcut_length = 0
|
119 |
+
|
120 |
+
edgeindex = []
|
121 |
+
half_index = 0
|
122 |
+
|
123 |
+
for index, item in enumerate(sentences):
|
124 |
+
|
125 |
+
if halfcut_length+ item['size'] < half_cut:
|
126 |
+
halfcut_length += item['size']
|
127 |
+
half_index = index
|
128 |
+
else:
|
129 |
+
edgeindex.append(half_index)
|
130 |
+
halfcut_length = -2 * max_cut
|
131 |
+
|
132 |
+
|
133 |
+
if totalLength + item['size'] < max_cut and not currentSentence.endswith(EOSX_str):
|
134 |
+
currentSentence += item['text']
|
135 |
+
totalLength += item['size']
|
136 |
+
else:
|
137 |
+
|
138 |
+
if len(currentSentence.strip()) > min_chars_cut:
|
139 |
+
sentencelist.append(currentSentence.strip())
|
140 |
+
|
141 |
+
currentSentence = item['text']
|
142 |
+
totalLength = item['size']
|
143 |
+
halfcut_length = item['size']
|
144 |
+
|
145 |
+
if len(currentSentence.strip()) > min_chars_cut:
|
146 |
+
sentencelist.append(currentSentence.strip())
|
147 |
+
|
148 |
+
unique_blocks = len(sentencelist)
|
149 |
+
print(f"Text Blocks: {unique_blocks}")
|
150 |
+
|
151 |
+
#overlap strategies:
|
152 |
+
# don't overlap across HARD CUT (EOSX)
|
153 |
+
if overlap:
|
154 |
+
for edge_idx in edgeindex:
|
155 |
+
currentSentence = ''
|
156 |
+
totalLength = 0
|
157 |
+
|
158 |
+
for item in sentences[edge_idx:]:
|
159 |
+
if totalLength + item['size'] < max_cut:
|
160 |
+
currentSentence += item['text']
|
161 |
+
totalLength += item['size']
|
162 |
+
else:
|
163 |
+
#if by chance EOSX is at the end then it's acceptable
|
164 |
+
if currentSentence.endswith(EOSX_str) and len(currentSentence.strip()) > min_chars_cut:
|
165 |
+
sentencelist.append(currentSentence.strip())
|
166 |
+
# otherwise don't cross hard cut
|
167 |
+
elif EOSX_str not in currentSentence and len(currentSentence.strip()) > min_chars_cut:
|
168 |
+
sentencelist.append(currentSentence.strip())
|
169 |
+
|
170 |
+
currentSentence = ''
|
171 |
+
totalLength = 0
|
172 |
+
break
|
173 |
+
|
174 |
+
print(f"+ Overlapping blocks: {len(sentencelist)-unique_blocks}")
|
175 |
+
|
176 |
+
num_EOS = 0
|
177 |
+
for i in range(len(sentencelist)):
|
178 |
+
if eos_to_hc:
|
179 |
+
sentencelist[i] = sentencelist[i].replace(EOSX_str, EOS_str)
|
180 |
+
else:
|
181 |
+
sentencelist[i] = sentencelist[i].replace(EOSX_str, '')
|
182 |
+
|
183 |
+
#someone may have had stop strings in the raw text...
|
184 |
+
sentencelist[i] = sentencelist[i].replace("</s></s>", EOS_str)
|
185 |
+
num_EOS += sentencelist[i].count(EOS_str)
|
186 |
+
|
187 |
+
if num_EOS > 0:
|
188 |
+
print(f"+ EOS count: {num_EOS}")
|
189 |
+
|
190 |
+
#final check for useless lines
|
191 |
+
sentencelist = [item for item in sentencelist if item.strip() != "</s>"]
|
192 |
+
sentencelist = [item for item in sentencelist if item.strip() != ""]
|
193 |
+
|
194 |
+
|
195 |
+
if debug_slicer:
|
196 |
+
# Write the log file
|
197 |
+
Path('logs').mkdir(exist_ok=True)
|
198 |
+
sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)}
|
199 |
+
output_file = "logs/sentencelist.json"
|
200 |
+
with open(output_file, 'w') as f:
|
201 |
+
json.dump(sentencelist_dict, f,indent=2)
|
202 |
+
|
203 |
+
print("Saved sentencelist.json in logs folder")
|
204 |
+
|
205 |
+
return sentencelist
|
206 |
+
|
207 |
+
|
208 |
+
def sliding_block_cut(text: str, min_chars_cut: int, eos_to_hc: bool, cutoff_len: int, hard_cut_string: str, debug_slicer:bool):
|
209 |
+
|
210 |
+
EOSX_str = '<//>' #hardcut placeholder
|
211 |
+
EOS_str = '</s>'
|
212 |
+
print("Mega Block Overlap: ON")
|
213 |
+
|
214 |
+
cut_string = hard_cut_string.replace('\\n', '\n')
|
215 |
+
text = text.replace(cut_string, EOSX_str)
|
216 |
+
sentences = split_sentences(text, cutoff_len)
|
217 |
+
|
218 |
+
print(f"Sentences: {len(sentences)}")
|
219 |
+
sentencelist = []
|
220 |
+
|
221 |
+
max_cut = cutoff_len-1
|
222 |
+
|
223 |
+
#print(f"max_cut: {max_cut}")
|
224 |
+
advancing_to = 0
|
225 |
+
|
226 |
+
prev_block_lastsentence = ""
|
227 |
+
|
228 |
+
|
229 |
+
for i in range(len(sentences)):
|
230 |
+
totalLength = 0
|
231 |
+
currentSentence = ''
|
232 |
+
lastsentence = ""
|
233 |
+
|
234 |
+
if i >= advancing_to:
|
235 |
+
for k in range(i, len(sentences)):
|
236 |
+
|
237 |
+
current_length = sentences[k]['size']
|
238 |
+
|
239 |
+
if totalLength + current_length <= max_cut and not currentSentence.endswith(EOSX_str):
|
240 |
+
currentSentence += sentences[k]['text']
|
241 |
+
totalLength += current_length
|
242 |
+
lastsentence = sentences[k]['text']
|
243 |
+
else:
|
244 |
+
if len(currentSentence.strip()) > min_chars_cut:
|
245 |
+
if prev_block_lastsentence!=lastsentence:
|
246 |
+
sentencelist.append(currentSentence.strip())
|
247 |
+
prev_block_lastsentence = lastsentence
|
248 |
+
|
249 |
+
advancing_to = 0
|
250 |
+
if currentSentence.endswith(EOSX_str):
|
251 |
+
advancing_to = k
|
252 |
+
|
253 |
+
currentSentence = ""
|
254 |
+
totalLength = 0
|
255 |
+
break
|
256 |
+
|
257 |
+
if currentSentence != "":
|
258 |
+
if len(currentSentence.strip()) > min_chars_cut:
|
259 |
+
sentencelist.append(currentSentence.strip())
|
260 |
+
|
261 |
+
unique_blocks = len(sentencelist)
|
262 |
+
print(f"Text Blocks: {unique_blocks}")
|
263 |
+
num_EOS = 0
|
264 |
+
for i in range(len(sentencelist)):
|
265 |
+
if eos_to_hc:
|
266 |
+
sentencelist[i] = sentencelist[i].replace(EOSX_str, EOS_str)
|
267 |
+
else:
|
268 |
+
sentencelist[i] = sentencelist[i].replace(EOSX_str, '')
|
269 |
+
|
270 |
+
#someone may have had stop strings in the raw text...
|
271 |
+
sentencelist[i] = sentencelist[i].replace("</s></s>", EOS_str)
|
272 |
+
num_EOS += sentencelist[i].count(EOS_str)
|
273 |
+
|
274 |
+
if num_EOS > 0:
|
275 |
+
print(f"+ EOS count: {num_EOS}")
|
276 |
+
|
277 |
+
#final check for useless lines
|
278 |
+
sentencelist = [item for item in sentencelist if item.strip() != "</s>"]
|
279 |
+
sentencelist = [item for item in sentencelist if item.strip() != ""]
|
280 |
+
|
281 |
+
|
282 |
+
if debug_slicer:
|
283 |
+
# Write the log file
|
284 |
+
Path('logs').mkdir(exist_ok=True)
|
285 |
+
sentencelist_dict = {index: sentence for index, sentence in enumerate(sentencelist)}
|
286 |
+
output_file = "logs/sentencelist.json"
|
287 |
+
with open(output_file, 'w') as f:
|
288 |
+
json.dump(sentencelist_dict, f,indent=2)
|
289 |
+
|
290 |
+
print("Saved sentencelist.json in logs folder")
|
291 |
+
|
292 |
+
return sentencelist
|
293 |
+
|
294 |
+
# Example usage:
|
295 |
+
# download_file_from_url('https://example.com/path/to/your/file.ext', '/output/directory')
|
296 |
+
|
297 |
+
def download_file_from_url(url, overwrite, output_dir_in, valid_extensions = {'.txt', '.json'}):
|
298 |
+
try:
|
299 |
+
# Validate and sanitize the URL
|
300 |
+
#parsed_url = urllib.parse.urlparse(url)
|
301 |
+
#if not parsed_url.netloc:
|
302 |
+
# raise ValueError("Invalid URL")
|
303 |
+
#filename = os.path.basename(parsed_url.path)
|
304 |
+
|
305 |
+
# Get the filename from the URL
|
306 |
+
|
307 |
+
session = requests.Session()
|
308 |
+
headers = {}
|
309 |
+
mode = 'wb'
|
310 |
+
filename = url.split('/')[-1]
|
311 |
+
|
312 |
+
output_dir = str(output_dir_in)
|
313 |
+
# Construct the full path to the output file
|
314 |
+
local_filename = os.path.join(output_dir, filename)
|
315 |
+
|
316 |
+
# Check if the local file already exists
|
317 |
+
overw = ''
|
318 |
+
if os.path.exists(local_filename):
|
319 |
+
if not overwrite:
|
320 |
+
yield f"File '{local_filename}' already exists. Aborting."
|
321 |
+
return
|
322 |
+
else:
|
323 |
+
overw = ' [Overwrite existing]'
|
324 |
+
|
325 |
+
filename_lower = filename.lower()
|
326 |
+
|
327 |
+
# Send an HTTP GET request to the URL with a timeout
|
328 |
+
file_extension = os.path.splitext(filename_lower)[-1]
|
329 |
+
|
330 |
+
if file_extension not in valid_extensions:
|
331 |
+
yield f"Invalid file extension: {file_extension}. Only {valid_extensions} files are supported."
|
332 |
+
return
|
333 |
+
|
334 |
+
with session.get(url, stream=True, headers=headers, timeout=10) as r:
|
335 |
+
r.raise_for_status()
|
336 |
+
# total size can be wildly inaccurate
|
337 |
+
#total_size = int(r.headers.get('content-length', 0))
|
338 |
+
|
339 |
+
block_size = 1024 * 4
|
340 |
+
with open(local_filename, mode) as f:
|
341 |
+
count = 0
|
342 |
+
for data in r.iter_content(block_size):
|
343 |
+
f.write(data)
|
344 |
+
count += len(data)
|
345 |
+
|
346 |
+
yield f"Downloaded: {count} " + overw
|
347 |
+
|
348 |
+
# Verify file size if possible
|
349 |
+
if os.path.exists(local_filename):
|
350 |
+
downloaded_size = os.path.getsize(local_filename)
|
351 |
+
if downloaded_size > 0:
|
352 |
+
yield f"File '{filename}' downloaded to '{output_dir}' ({downloaded_size} bytes)."
|
353 |
+
print("File Downloaded")
|
354 |
+
else:
|
355 |
+
print("Downloaded file is zero")
|
356 |
+
yield f"Failed. Downloaded file size is zero)."
|
357 |
+
else:
|
358 |
+
print(f"Error: {local_filename} failed to download.")
|
359 |
+
yield f"Error: {local_filename} failed to download"
|
360 |
+
|
361 |
+
except Exception as e:
|
362 |
+
print(f"An error occurred: {e}")
|
363 |
+
yield f"An error occurred: {e}"
|
364 |
+
|
365 |
+
finally:
|
366 |
+
# Close the session to release resources
|
367 |
+
session.close()
|
368 |
+
|
extensions/character_bias/script.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
# get the current directory of the script
|
6 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
7 |
+
|
8 |
+
# check if the bias_options.txt file exists, if not, create it
|
9 |
+
bias_file = os.path.join(current_dir, "bias_options.txt")
|
10 |
+
if not os.path.isfile(bias_file):
|
11 |
+
with open(bias_file, "w") as f:
|
12 |
+
f.write("*I am so happy*\n*I am so sad*\n*I am so excited*\n*I am so bored*\n*I am so angry*")
|
13 |
+
|
14 |
+
# read bias options from the text file
|
15 |
+
with open(bias_file, "r") as f:
|
16 |
+
bias_options = [line.strip() for line in f.readlines()]
|
17 |
+
|
18 |
+
params = {
|
19 |
+
"activate": True,
|
20 |
+
"bias string": " *I am so happy*",
|
21 |
+
"custom string": "",
|
22 |
+
}
|
23 |
+
|
24 |
+
|
25 |
+
def input_modifier(string):
|
26 |
+
"""
|
27 |
+
This function is applied to your text inputs before
|
28 |
+
they are fed into the model.
|
29 |
+
"""
|
30 |
+
return string
|
31 |
+
|
32 |
+
|
33 |
+
def output_modifier(string):
|
34 |
+
"""
|
35 |
+
This function is applied to the model outputs.
|
36 |
+
"""
|
37 |
+
return string
|
38 |
+
|
39 |
+
|
40 |
+
def bot_prefix_modifier(string):
|
41 |
+
"""
|
42 |
+
This function is only applied in chat mode. It modifies
|
43 |
+
the prefix text for the Bot and can be used to bias its
|
44 |
+
behavior.
|
45 |
+
"""
|
46 |
+
if params['activate']:
|
47 |
+
if params['custom string'].strip() != '':
|
48 |
+
return f'{string} {params["custom string"].strip()} '
|
49 |
+
else:
|
50 |
+
return f'{string} {params["bias string"].strip()} '
|
51 |
+
else:
|
52 |
+
return string
|
53 |
+
|
54 |
+
|
55 |
+
def ui():
|
56 |
+
# Gradio elements
|
57 |
+
activate = gr.Checkbox(value=params['activate'], label='Activate character bias')
|
58 |
+
dropdown_string = gr.Dropdown(choices=bias_options, value=params["bias string"], label='Character bias', info='To edit the options in this dropdown edit the "bias_options.txt" file')
|
59 |
+
custom_string = gr.Textbox(value=params['custom string'], placeholder="Enter custom bias string", label="Custom Character Bias", info='If not empty, will be used instead of the value above')
|
60 |
+
|
61 |
+
# Event functions to update the parameters in the backend
|
62 |
+
def update_bias_string(x):
|
63 |
+
if x:
|
64 |
+
params.update({"bias string": x})
|
65 |
+
else:
|
66 |
+
params.update({"bias string": dropdown_string.get()})
|
67 |
+
return x
|
68 |
+
|
69 |
+
def update_custom_string(x):
|
70 |
+
params.update({"custom string": x})
|
71 |
+
|
72 |
+
dropdown_string.change(update_bias_string, dropdown_string, None)
|
73 |
+
custom_string.change(update_custom_string, custom_string, None)
|
74 |
+
activate.change(lambda x: params.update({"activate": x}), activate, None)
|
extensions/coqui_tts/harvard_sentences.txt
ADDED
@@ -0,0 +1,720 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
The birch canoe slid on the smooth planks.
|
2 |
+
Glue the sheet to the dark blue background.
|
3 |
+
It's easy to tell the depth of a well.
|
4 |
+
These days a chicken leg is a rare dish.
|
5 |
+
Rice is often served in round bowls.
|
6 |
+
The juice of lemons makes fine punch.
|
7 |
+
The box was thrown beside the parked truck.
|
8 |
+
The hogs were fed chopped corn and garbage.
|
9 |
+
Four hours of steady work faced us.
|
10 |
+
A large size in stockings is hard to sell.
|
11 |
+
The boy was there when the sun rose.
|
12 |
+
A rod is used to catch pink salmon.
|
13 |
+
The source of the huge river is the clear spring.
|
14 |
+
Kick the ball straight and follow through.
|
15 |
+
Help the woman get back to her feet.
|
16 |
+
A pot of tea helps to pass the evening.
|
17 |
+
Smoky fires lack flame and heat.
|
18 |
+
The soft cushion broke the man's fall.
|
19 |
+
The salt breeze came across from the sea.
|
20 |
+
The girl at the booth sold fifty bonds.
|
21 |
+
The small pup gnawed a hole in the sock.
|
22 |
+
The fish twisted and turned on the bent hook.
|
23 |
+
Press the pants and sew a button on the vest.
|
24 |
+
The swan dive was far short of perfect.
|
25 |
+
The beauty of the view stunned the young boy.
|
26 |
+
Two blue fish swam in the tank.
|
27 |
+
Her purse was full of useless trash.
|
28 |
+
The colt reared and threw the tall rider.
|
29 |
+
It snowed, rained, and hailed the same morning.
|
30 |
+
Read verse out loud for pleasure.
|
31 |
+
Hoist the load to your left shoulder.
|
32 |
+
Take the winding path to reach the lake.
|
33 |
+
Note closely the size of the gas tank.
|
34 |
+
Wipe the grease off his dirty face.
|
35 |
+
Mend the coat before you go out.
|
36 |
+
The wrist was badly strained and hung limp.
|
37 |
+
The stray cat gave birth to kittens.
|
38 |
+
The young girl gave no clear response.
|
39 |
+
The meal was cooked before the bell rang.
|
40 |
+
What joy there is in living.
|
41 |
+
A king ruled the state in the early days.
|
42 |
+
The ship was torn apart on the sharp reef.
|
43 |
+
Sickness kept him home the third week.
|
44 |
+
The wide road shimmered in the hot sun.
|
45 |
+
The lazy cow lay in the cool grass.
|
46 |
+
Lift the square stone over the fence.
|
47 |
+
The rope will bind the seven books at once.
|
48 |
+
Hop over the fence and plunge in.
|
49 |
+
The friendly gang left the drug store.
|
50 |
+
Mesh wire keeps chicks inside.
|
51 |
+
The frosty air passed through the coat.
|
52 |
+
The crooked maze failed to fool the mouse.
|
53 |
+
Adding fast leads to wrong sums.
|
54 |
+
The show was a flop from the very start.
|
55 |
+
A saw is a tool used for making boards.
|
56 |
+
The wagon moved on well oiled wheels.
|
57 |
+
March the soldiers past the next hill.
|
58 |
+
A cup of sugar makes sweet fudge.
|
59 |
+
Place a rosebush near the porch steps.
|
60 |
+
Both lost their lives in the raging storm.
|
61 |
+
We talked of the side show in the circus.
|
62 |
+
Use a pencil to write the first draft.
|
63 |
+
He ran half way to the hardware store.
|
64 |
+
The clock struck to mark the third period.
|
65 |
+
A small creek cut across the field.
|
66 |
+
Cars and busses stalled in snow drifts.
|
67 |
+
The set of china hit the floor with a crash.
|
68 |
+
This is a grand season for hikes on the road.
|
69 |
+
The dune rose from the edge of the water.
|
70 |
+
Those words were the cue for the actor to leave.
|
71 |
+
A yacht slid around the point into the bay.
|
72 |
+
The two met while playing on the sand.
|
73 |
+
The ink stain dried on the finished page.
|
74 |
+
The walled town was seized without a fight.
|
75 |
+
The lease ran out in sixteen weeks.
|
76 |
+
A tame squirrel makes a nice pet.
|
77 |
+
The horn of the car woke the sleeping cop.
|
78 |
+
The heart beat strongly and with firm strokes.
|
79 |
+
The pearl was worn in a thin silver ring.
|
80 |
+
The fruit peel was cut in thick slices.
|
81 |
+
The Navy attacked the big task force.
|
82 |
+
See the cat glaring at the scared mouse.
|
83 |
+
There are more than two factors here.
|
84 |
+
The hat brim was wide and too droopy.
|
85 |
+
The lawyer tried to lose his case.
|
86 |
+
The grass curled around the fence post.
|
87 |
+
Cut the pie into large parts.
|
88 |
+
Men strive but seldom get rich.
|
89 |
+
Always close the barn door tight.
|
90 |
+
He lay prone and hardly moved a limb.
|
91 |
+
The slush lay deep along the street.
|
92 |
+
A wisp of cloud hung in the blue air.
|
93 |
+
A pound of sugar costs more than eggs.
|
94 |
+
The fin was sharp and cut the clear water.
|
95 |
+
The play seems dull and quite stupid.
|
96 |
+
Bail the boat to stop it from sinking.
|
97 |
+
The term ended in late June that year.
|
98 |
+
A tusk is used to make costly gifts.
|
99 |
+
Ten pins were set in order.
|
100 |
+
The bill was paid every third week.
|
101 |
+
Oak is strong and also gives shade.
|
102 |
+
Cats and dogs each hate the other.
|
103 |
+
The pipe began to rust while new.
|
104 |
+
Open the crate but don't break the glass.
|
105 |
+
Add the sum to the product of these three.
|
106 |
+
Thieves who rob friends deserve jail.
|
107 |
+
The ripe taste of cheese improves with age.
|
108 |
+
Act on these orders with great speed.
|
109 |
+
The hog crawled under the high fence.
|
110 |
+
Move the vat over the hot fire.
|
111 |
+
The bark of the pine tree was shiny and dark.
|
112 |
+
Leaves turn brown and yellow in the fall.
|
113 |
+
The pennant waved when the wind blew.
|
114 |
+
Split the log with a quick, sharp blow.
|
115 |
+
Burn peat after the logs give out.
|
116 |
+
He ordered peach pie with ice cream.
|
117 |
+
Weave the carpet on the right hand side.
|
118 |
+
Hemp is a weed found in parts of the tropics.
|
119 |
+
A lame back kept his score low.
|
120 |
+
We find joy in the simplest things.
|
121 |
+
Type out three lists of orders.
|
122 |
+
The harder he tried the less he got done.
|
123 |
+
The boss ran the show with a watchful eye.
|
124 |
+
The cup cracked and spilled its contents.
|
125 |
+
Paste can cleanse the most dirty brass.
|
126 |
+
The slang word for raw whiskey is booze.
|
127 |
+
It caught its hind paw in a rusty trap.
|
128 |
+
The wharf could be seen at the farther shore.
|
129 |
+
Feel the heat of the weak dying flame.
|
130 |
+
The tiny girl took off her hat.
|
131 |
+
A cramp is no small danger on a swim.
|
132 |
+
He said the same phrase thirty times.
|
133 |
+
Pluck the bright rose without leaves.
|
134 |
+
Two plus seven is less than ten.
|
135 |
+
The glow deepened in the eyes of the sweet girl.
|
136 |
+
Bring your problems to the wise chief.
|
137 |
+
Write a fond note to the friend you cherish.
|
138 |
+
Clothes and lodging are free to new men.
|
139 |
+
We frown when events take a bad turn.
|
140 |
+
Port is a strong wine with a smoky taste.
|
141 |
+
The young kid jumped the rusty gate.
|
142 |
+
Guess the results from the first scores.
|
143 |
+
A salt pickle tastes fine with ham.
|
144 |
+
The just claim got the right verdict.
|
145 |
+
These thistles bend in a high wind.
|
146 |
+
Pure bred poodles have curls.
|
147 |
+
The tree top waved in a graceful way.
|
148 |
+
The spot on the blotter was made by green ink.
|
149 |
+
Mud was spattered on the front of his white shirt.
|
150 |
+
The cigar burned a hole in the desk top.
|
151 |
+
The empty flask stood on the tin tray.
|
152 |
+
A speedy man can beat this track mark.
|
153 |
+
He broke a new shoelace that day.
|
154 |
+
The coffee stand is too high for the couch.
|
155 |
+
The urge to write short stories is rare.
|
156 |
+
The pencils have all been used.
|
157 |
+
The pirates seized the crew of the lost ship.
|
158 |
+
We tried to replace the coin but failed.
|
159 |
+
She sewed the torn coat quite neatly.
|
160 |
+
The sofa cushion is red and of light weight.
|
161 |
+
The jacket hung on the back of the wide chair.
|
162 |
+
At that high level the air is pure.
|
163 |
+
Drop the two when you add the figures.
|
164 |
+
A filing case is now hard to buy.
|
165 |
+
An abrupt start does not win the prize.
|
166 |
+
Wood is best for making toys and blocks.
|
167 |
+
The office paint was a dull, sad tan.
|
168 |
+
He knew the skill of the great young actress.
|
169 |
+
A rag will soak up spilled water.
|
170 |
+
A shower of dirt fell from the hot pipes.
|
171 |
+
Steam hissed from the broken valve.
|
172 |
+
The child almost hurt the small dog.
|
173 |
+
There was a sound of dry leaves outside.
|
174 |
+
The sky that morning was clear and bright blue.
|
175 |
+
Torn scraps littered the stone floor.
|
176 |
+
Sunday is the best part of the week.
|
177 |
+
The doctor cured him with these pills.
|
178 |
+
The new girl was fired today at noon.
|
179 |
+
They felt gay when the ship arrived in port.
|
180 |
+
Add the store's account to the last cent.
|
181 |
+
Acid burns holes in wool cloth.
|
182 |
+
Fairy tales should be fun to write.
|
183 |
+
Eight miles of woodland burned to waste.
|
184 |
+
The third act was dull and tired the players.
|
185 |
+
A young child should not suffer fright.
|
186 |
+
Add the column and put the sum here.
|
187 |
+
We admire and love a good cook.
|
188 |
+
There the flood mark is ten inches.
|
189 |
+
He carved a head from the round block of marble.
|
190 |
+
She has a smart way of wearing clothes.
|
191 |
+
The fruit of a fig tree is apple-shaped.
|
192 |
+
Corn cobs can be used to kindle a fire.
|
193 |
+
Where were they when the noise started.
|
194 |
+
The paper box is full of thumb tacks.
|
195 |
+
Sell your gift to a buyer at a good gain.
|
196 |
+
The tongs lay beside the ice pail.
|
197 |
+
The petals fall with the next puff of wind.
|
198 |
+
Bring your best compass to the third class.
|
199 |
+
They could laugh although they were sad.
|
200 |
+
Farmers came in to thresh the oat crop.
|
201 |
+
The brown house was on fire to the attic.
|
202 |
+
The lure is used to catch trout and flounder.
|
203 |
+
Float the soap on top of the bath water.
|
204 |
+
A blue crane is a tall wading bird.
|
205 |
+
A fresh start will work such wonders.
|
206 |
+
The club rented the rink for the fifth night.
|
207 |
+
After the dance, they went straight home.
|
208 |
+
The hostess taught the new maid to serve.
|
209 |
+
He wrote his last novel there at the inn.
|
210 |
+
Even the worst will beat his low score.
|
211 |
+
The cement had dried when he moved it.
|
212 |
+
The loss of the second ship was hard to take.
|
213 |
+
The fly made its way along the wall.
|
214 |
+
Do that with a wooden stick.
|
215 |
+
Live wires should be kept covered.
|
216 |
+
The large house had hot water taps.
|
217 |
+
It is hard to erase blue or red ink.
|
218 |
+
Write at once or you may forget it.
|
219 |
+
The doorknob was made of bright clean brass.
|
220 |
+
The wreck occurred by the bank on Main Street.
|
221 |
+
A pencil with black lead writes best.
|
222 |
+
Coax a young calf to drink from a bucket.
|
223 |
+
Schools for ladies teach charm and grace.
|
224 |
+
The lamp shone with a steady green flame.
|
225 |
+
They took the axe and the saw to the forest.
|
226 |
+
The ancient coin was quite dull and worn.
|
227 |
+
The shaky barn fell with a loud crash.
|
228 |
+
Jazz and swing fans like fast music.
|
229 |
+
Rake the rubbish up and then burn it.
|
230 |
+
Slash the gold cloth into fine ribbons.
|
231 |
+
Try to have the court decide the case.
|
232 |
+
They are pushed back each time they attack.
|
233 |
+
He broke his ties with groups of former friends.
|
234 |
+
They floated on the raft to sun their white backs.
|
235 |
+
The map had an X that meant nothing.
|
236 |
+
Whitings are small fish caught in nets.
|
237 |
+
Some ads serve to cheat buyers.
|
238 |
+
Jerk the rope and the bell rings weakly.
|
239 |
+
A waxed floor makes us lose balance.
|
240 |
+
Madam, this is the best brand of corn.
|
241 |
+
On the islands the sea breeze is soft and mild.
|
242 |
+
The play began as soon as we sat down.
|
243 |
+
This will lead the world to more sound and fury.
|
244 |
+
Add salt before you fry the egg.
|
245 |
+
The rush for funds reached its peak Tuesday.
|
246 |
+
The birch looked stark white and lonesome.
|
247 |
+
The box is held by a bright red snapper.
|
248 |
+
To make pure ice, you freeze water.
|
249 |
+
The first worm gets snapped early.
|
250 |
+
Jump the fence and hurry up the bank.
|
251 |
+
Yell and clap as the curtain slides back.
|
252 |
+
They are men who walk the middle of the road.
|
253 |
+
Both brothers wear the same size.
|
254 |
+
In some form or other we need fun.
|
255 |
+
The prince ordered his head chopped off.
|
256 |
+
The houses are built of red clay bricks.
|
257 |
+
Ducks fly north but lack a compass.
|
258 |
+
Fruit flavors are used in fizz drinks.
|
259 |
+
These pills do less good than others.
|
260 |
+
Canned pears lack full flavor.
|
261 |
+
The dark pot hung in the front closet.
|
262 |
+
Carry the pail to the wall and spill it there.
|
263 |
+
The train brought our hero to the big town.
|
264 |
+
We are sure that one war is enough.
|
265 |
+
Gray paint stretched for miles around.
|
266 |
+
The rude laugh filled the empty room.
|
267 |
+
High seats are best for football fans.
|
268 |
+
Tea served from the brown jug is tasty.
|
269 |
+
A dash of pepper spoils beef stew.
|
270 |
+
A zestful food is the hot-cross bun.
|
271 |
+
The horse trotted around the field at a brisk pace.
|
272 |
+
Find the twin who stole the pearl necklace.
|
273 |
+
Cut the cord that binds the box tightly.
|
274 |
+
The red tape bound the smuggled food.
|
275 |
+
Look in the corner to find the tan shirt.
|
276 |
+
The cold drizzle will halt the bond drive.
|
277 |
+
Nine men were hired to dig the ruins.
|
278 |
+
The junk yard had a mouldy smell.
|
279 |
+
The flint sputtered and lit a pine torch.
|
280 |
+
Soak the cloth and drown the sharp odor.
|
281 |
+
The shelves were bare of both jam or crackers.
|
282 |
+
A joy to every child is the swan boat.
|
283 |
+
All sat frozen and watched the screen.
|
284 |
+
A cloud of dust stung his tender eyes.
|
285 |
+
To reach the end he needs much courage.
|
286 |
+
Shape the clay gently into block form.
|
287 |
+
A ridge on a smooth surface is a bump or flaw.
|
288 |
+
Hedge apples may stain your hands green.
|
289 |
+
Quench your thirst, then eat the crackers.
|
290 |
+
Tight curls get limp on rainy days.
|
291 |
+
The mute muffled the high tones of the horn.
|
292 |
+
The gold ring fits only a pierced ear.
|
293 |
+
The old pan was covered with hard fudge.
|
294 |
+
Watch the log float in the wide river.
|
295 |
+
The node on the stalk of wheat grew daily.
|
296 |
+
The heap of fallen leaves was set on fire.
|
297 |
+
Write fast if you want to finish early.
|
298 |
+
His shirt was clean but one button was gone.
|
299 |
+
The barrel of beer was a brew of malt and hops.
|
300 |
+
Tin cans are absent from store shelves.
|
301 |
+
Slide the box into that empty space.
|
302 |
+
The plant grew large and green in the window.
|
303 |
+
The beam dropped down on the workmen's head.
|
304 |
+
Pink clouds floated with the breeze.
|
305 |
+
She danced like a swan, tall and graceful.
|
306 |
+
The tube was blown and the tire flat and useless.
|
307 |
+
It is late morning on the old wall clock.
|
308 |
+
Let's all join as we sing the last chorus.
|
309 |
+
The last switch cannot be turned off.
|
310 |
+
The fight will end in just six minutes.
|
311 |
+
The store walls were lined with colored frocks.
|
312 |
+
The peace league met to discuss their plans.
|
313 |
+
The rise to fame of a person takes luck.
|
314 |
+
Paper is scarce, so write with much care.
|
315 |
+
The quick fox jumped on the sleeping cat.
|
316 |
+
The nozzle of the fire hose was bright brass.
|
317 |
+
Screw the round cap on as tight as needed.
|
318 |
+
Time brings us many changes.
|
319 |
+
The purple tie was ten years old.
|
320 |
+
Men think and plan and sometimes act.
|
321 |
+
Fill the ink jar with sticky glue.
|
322 |
+
He smoke a big pipe with strong contents.
|
323 |
+
We need grain to keep our mules healthy.
|
324 |
+
Pack the records in a neat thin case.
|
325 |
+
The crunch of feet in the snow was the only sound.
|
326 |
+
The copper bowl shone in the sun's rays.
|
327 |
+
Boards will warp unless kept dry.
|
328 |
+
The plush chair leaned against the wall.
|
329 |
+
Glass will clink when struck by metal.
|
330 |
+
Bathe and relax in the cool green grass.
|
331 |
+
Nine rows of soldiers stood in line.
|
332 |
+
The beach is dry and shallow at low tide.
|
333 |
+
The idea is to sew both edges straight.
|
334 |
+
The kitten chased the dog down the street.
|
335 |
+
Pages bound in cloth make a book.
|
336 |
+
Try to trace the fine lines of the painting.
|
337 |
+
Women form less than half of the group.
|
338 |
+
The zones merge in the central part of town.
|
339 |
+
A gem in the rough needs work to polish.
|
340 |
+
Code is used when secrets are sent.
|
341 |
+
Most of the news is easy for us to hear.
|
342 |
+
He used the lathe to make brass objects.
|
343 |
+
The vane on top of the pole revolved in the wind.
|
344 |
+
Mince pie is a dish served to children.
|
345 |
+
The clan gathered on each dull night.
|
346 |
+
Let it burn, it gives us warmth and comfort.
|
347 |
+
A castle built from sand fails to endure.
|
348 |
+
A child's wit saved the day for us.
|
349 |
+
Tack the strip of carpet to the worn floor.
|
350 |
+
Next Tuesday we must vote.
|
351 |
+
Pour the stew from the pot into the plate.
|
352 |
+
Each penny shone like new.
|
353 |
+
The man went to the woods to gather sticks.
|
354 |
+
The dirt piles were lines along the road.
|
355 |
+
The logs fell and tumbled into the clear stream.
|
356 |
+
Just hoist it up and take it away.
|
357 |
+
A ripe plum is fit for a king's palate.
|
358 |
+
Our plans right now are hazy.
|
359 |
+
Brass rings are sold by these natives.
|
360 |
+
It takes a good trap to capture a bear.
|
361 |
+
Feed the white mouse some flower seeds.
|
362 |
+
The thaw came early and freed the stream.
|
363 |
+
He took the lead and kept it the whole distance.
|
364 |
+
The key you designed will fit the lock.
|
365 |
+
Plead to the council to free the poor thief.
|
366 |
+
Better hash is made of rare beef.
|
367 |
+
This plank was made for walking on.
|
368 |
+
The lake sparkled in the red hot sun.
|
369 |
+
He crawled with care along the ledge.
|
370 |
+
Tend the sheep while the dog wanders.
|
371 |
+
It takes a lot of help to finish these.
|
372 |
+
Mark the spot with a sign painted red.
|
373 |
+
Take two shares as a fair profit.
|
374 |
+
The fur of cats goes by many names.
|
375 |
+
North winds bring colds and fevers.
|
376 |
+
He asks no person to vouch for him.
|
377 |
+
Go now and come here later.
|
378 |
+
A sash of gold silk will trim her dress.
|
379 |
+
Soap can wash most dirt away.
|
380 |
+
That move means the game is over.
|
381 |
+
He wrote down a long list of items.
|
382 |
+
A siege will crack the strong defense.
|
383 |
+
Grape juice and water mix well.
|
384 |
+
Roads are paved with sticky tar.
|
385 |
+
Fake stones shine but cost little.
|
386 |
+
The drip of the rain made a pleasant sound.
|
387 |
+
Smoke poured out of every crack.
|
388 |
+
Serve the hot rum to the tired heroes.
|
389 |
+
Much of the story makes good sense.
|
390 |
+
The sun came up to light the eastern sky.
|
391 |
+
Heave the line over the port side.
|
392 |
+
A lathe cuts and trims any wood.
|
393 |
+
It's a dense crowd in two distinct ways.
|
394 |
+
His hip struck the knee of the next player.
|
395 |
+
The stale smell of old beer lingers.
|
396 |
+
The desk was firm on the shaky floor.
|
397 |
+
It takes heat to bring out the odor.
|
398 |
+
Beef is scarcer than some lamb.
|
399 |
+
Raise the sail and steer the ship northward.
|
400 |
+
A cone costs five cents on Mondays.
|
401 |
+
A pod is what peas always grow in.
|
402 |
+
Jerk the dart from the cork target.
|
403 |
+
No cement will hold hard wood.
|
404 |
+
We now have a new base for shipping.
|
405 |
+
A list of names is carved around the base.
|
406 |
+
The sheep were led home by a dog.
|
407 |
+
Three for a dime, the young peddler cried.
|
408 |
+
The sense of smell is better than that of touch.
|
409 |
+
No hardship seemed to keep him sad.
|
410 |
+
Grace makes up for lack of beauty.
|
411 |
+
Nudge gently but wake her now.
|
412 |
+
The news struck doubt into restless minds.
|
413 |
+
Once we stood beside the shore.
|
414 |
+
A chink in the wall allowed a draft to blow.
|
415 |
+
Fasten two pins on each side.
|
416 |
+
A cold dip restores health and zest.
|
417 |
+
He takes the oath of office each March.
|
418 |
+
The sand drifts over the sill of the old house.
|
419 |
+
The point of the steel pen was bent and twisted.
|
420 |
+
There is a lag between thought and act.
|
421 |
+
Seed is needed to plant the spring corn.
|
422 |
+
Draw the chart with heavy black lines.
|
423 |
+
The boy owed his pal thirty cents.
|
424 |
+
The chap slipped into the crowd and was lost.
|
425 |
+
Hats are worn to tea and not to dinner.
|
426 |
+
The ramp led up to the wide highway.
|
427 |
+
Beat the dust from the rug onto the lawn.
|
428 |
+
Say it slowly but make it ring clear.
|
429 |
+
The straw nest housed five robins.
|
430 |
+
Screen the porch with woven straw mats.
|
431 |
+
This horse will nose his way to the finish.
|
432 |
+
The dry wax protects the deep scratch.
|
433 |
+
He picked up the dice for a second roll.
|
434 |
+
These coins will be needed to pay his debt.
|
435 |
+
The nag pulled the frail cart along.
|
436 |
+
Twist the valve and release hot steam.
|
437 |
+
The vamp of the shoe had a gold buckle.
|
438 |
+
The smell of burned rags itches my nose.
|
439 |
+
New pants lack cuffs and pockets.
|
440 |
+
The marsh will freeze when cold enough.
|
441 |
+
They slice the sausage thin with a knife.
|
442 |
+
The bloom of the rose lasts a few days.
|
443 |
+
A gray mare walked before the colt.
|
444 |
+
Breakfast buns are fine with a hot drink.
|
445 |
+
Bottles hold four kinds of rum.
|
446 |
+
The man wore a feather in his felt hat.
|
447 |
+
He wheeled the bike past the winding road.
|
448 |
+
Drop the ashes on the worn old rug.
|
449 |
+
The desk and both chairs were painted tan.
|
450 |
+
Throw out the used paper cup and plate.
|
451 |
+
A clean neck means a neat collar.
|
452 |
+
The couch cover and hall drapes were blue.
|
453 |
+
The stems of the tall glasses cracked and broke.
|
454 |
+
The wall phone rang loud and often.
|
455 |
+
The clothes dried on a thin wooden rack.
|
456 |
+
Turn on the lantern which gives us light.
|
457 |
+
The cleat sank deeply into the soft turf.
|
458 |
+
The bills were mailed promptly on the tenth of the month.
|
459 |
+
To have is better than to wait and hope.
|
460 |
+
The price is fair for a good antique clock.
|
461 |
+
The music played on while they talked.
|
462 |
+
Dispense with a vest on a day like this.
|
463 |
+
The bunch of grapes was pressed into wine.
|
464 |
+
He sent the figs, but kept the ripe cherries.
|
465 |
+
The hinge on the door creaked with old age.
|
466 |
+
The screen before the fire kept in the sparks.
|
467 |
+
Fly by night, and you waste little time.
|
468 |
+
Thick glasses helped him read the print.
|
469 |
+
Birth and death mark the limits of life.
|
470 |
+
The chair looked strong but had no bottom.
|
471 |
+
The kite flew wildly in the high wind.
|
472 |
+
A fur muff is stylish once more.
|
473 |
+
The tin box held priceless stones.
|
474 |
+
We need an end of all such matter.
|
475 |
+
The case was puzzling to the old and wise.
|
476 |
+
The bright lanterns were gay on the dark lawn.
|
477 |
+
We don't get much money but we have fun.
|
478 |
+
The youth drove with zest, but little skill.
|
479 |
+
Five years he lived with a shaggy dog.
|
480 |
+
A fence cuts through the corner lot.
|
481 |
+
The way to save money is not to spend much.
|
482 |
+
Shut the hatch before the waves push it in.
|
483 |
+
The odor of spring makes young hearts jump.
|
484 |
+
Crack the walnut with your sharp side teeth.
|
485 |
+
He offered proof in the form of a large chart.
|
486 |
+
Send the stuff in a thick paper bag.
|
487 |
+
A quart of milk is water for the most part.
|
488 |
+
They told wild tales to frighten him.
|
489 |
+
The three story house was built of stone.
|
490 |
+
In the rear of the ground floor was a large passage.
|
491 |
+
A man in a blue sweater sat at the desk.
|
492 |
+
Oats are a food eaten by horse and man.
|
493 |
+
Their eyelids droop for want of sleep.
|
494 |
+
A sip of tea revives his tired friend.
|
495 |
+
There are many ways to do these things.
|
496 |
+
Tuck the sheet under the edge of the mat.
|
497 |
+
A force equal to that would move the earth.
|
498 |
+
We like to see clear weather.
|
499 |
+
The work of the tailor is seen on each side.
|
500 |
+
Take a chance and win a china doll.
|
501 |
+
Shake the dust from your shoes, stranger.
|
502 |
+
She was kind to sick old people.
|
503 |
+
The square wooden crate was packed to be shipped.
|
504 |
+
The dusty bench stood by the stone wall.
|
505 |
+
We dress to suit the weather of most days.
|
506 |
+
Smile when you say nasty words.
|
507 |
+
A bowl of rice is free with chicken stew.
|
508 |
+
The water in this well is a source of good health.
|
509 |
+
Take shelter in this tent, but keep still.
|
510 |
+
That guy is the writer of a few banned books.
|
511 |
+
The little tales they tell are false.
|
512 |
+
The door was barred, locked, and bolted as well.
|
513 |
+
Ripe pears are fit for a queen's table.
|
514 |
+
A big wet stain was on the round carpet.
|
515 |
+
The kite dipped and swayed, but stayed aloft.
|
516 |
+
The pleasant hours fly by much too soon.
|
517 |
+
The room was crowded with a wild mob.
|
518 |
+
This strong arm shall shield your honor.
|
519 |
+
She blushed when he gave her a white orchid.
|
520 |
+
The beetle droned in the hot June sun.
|
521 |
+
Press the pedal with your left foot.
|
522 |
+
Neat plans fail without luck.
|
523 |
+
The black trunk fell from the landing.
|
524 |
+
The bank pressed for payment of the debt.
|
525 |
+
The theft of the pearl pin was kept secret.
|
526 |
+
Shake hands with this friendly child.
|
527 |
+
The vast space stretched into the far distance.
|
528 |
+
A rich farm is rare in this sandy waste.
|
529 |
+
His wide grin earned many friends.
|
530 |
+
Flax makes a fine brand of paper.
|
531 |
+
Hurdle the pit with the aid of a long pole.
|
532 |
+
A strong bid may scare your partner stiff.
|
533 |
+
Even a just cause needs power to win.
|
534 |
+
Peep under the tent and see the clowns.
|
535 |
+
The leaf drifts along with a slow spin.
|
536 |
+
Cheap clothes are flashy but don't last.
|
537 |
+
A thing of small note can cause despair.
|
538 |
+
Flood the mails with requests for this book.
|
539 |
+
A thick coat of black paint covered all.
|
540 |
+
The pencil was cut to be sharp at both ends.
|
541 |
+
Those last words were a strong statement.
|
542 |
+
He wrote his name boldly at the top of the sheet.
|
543 |
+
Dill pickles are sour but taste fine.
|
544 |
+
Down that road is the way to the grain farmer.
|
545 |
+
Either mud or dust are found at all times.
|
546 |
+
The best method is to fix it in place with clips.
|
547 |
+
If you mumble your speech will be lost.
|
548 |
+
At night the alarm roused him from a deep sleep.
|
549 |
+
Read just what the meter says.
|
550 |
+
Fill your pack with bright trinkets for the poor.
|
551 |
+
The small red neon lamp went out.
|
552 |
+
Clams are small, round, soft, and tasty.
|
553 |
+
The fan whirled its round blades softly.
|
554 |
+
The line where the edges join was clean.
|
555 |
+
Breathe deep and smell the piny air.
|
556 |
+
It matters not if he reads these words or those.
|
557 |
+
A brown leather bag hung from its strap.
|
558 |
+
A toad and a frog are hard to tell apart.
|
559 |
+
A white silk jacket goes with any shoes.
|
560 |
+
A break in the dam almost caused a flood.
|
561 |
+
Paint the sockets in the wall dull green.
|
562 |
+
The child crawled into the dense grass.
|
563 |
+
Bribes fail where honest men work.
|
564 |
+
Trample the spark, else the flames will spread.
|
565 |
+
The hilt of the sword was carved with fine designs.
|
566 |
+
A round hole was drilled through the thin board.
|
567 |
+
Footprints showed the path he took up the beach.
|
568 |
+
She was waiting at my front lawn.
|
569 |
+
A vent near the edge brought in fresh air.
|
570 |
+
Prod the old mule with a crooked stick.
|
571 |
+
It is a band of steel three inches wide.
|
572 |
+
The pipe ran almost the length of the ditch.
|
573 |
+
It was hidden from sight by a mass of leaves and shrubs.
|
574 |
+
The weight of the package was seen on the high scale.
|
575 |
+
Wake and rise, and step into the green outdoors.
|
576 |
+
The green light in the brown box flickered.
|
577 |
+
The brass tube circled the high wall.
|
578 |
+
The lobes of her ears were pierced to hold rings.
|
579 |
+
Hold the hammer near the end to drive the nail.
|
580 |
+
Next Sunday is the twelfth of the month.
|
581 |
+
Every word and phrase he speaks is true.
|
582 |
+
He put his last cartridge into the gun and fired.
|
583 |
+
They took their kids from the public school.
|
584 |
+
Drive the screw straight into the wood.
|
585 |
+
Keep the hatch tight and the watch constant.
|
586 |
+
Sever the twine with a quick snip of the knife.
|
587 |
+
Paper will dry out when wet.
|
588 |
+
Slide the catch back and open the desk.
|
589 |
+
Help the weak to preserve their strength.
|
590 |
+
A sullen smile gets few friends.
|
591 |
+
Stop whistling and watch the boys march.
|
592 |
+
Jerk the cord, and out tumbles the gold.
|
593 |
+
Slide the tray across the glass top.
|
594 |
+
The cloud moved in a stately way and was gone.
|
595 |
+
Light maple makes for a swell room.
|
596 |
+
Set the piece here and say nothing.
|
597 |
+
Dull stories make her laugh.
|
598 |
+
A stiff cord will do to fasten your shoe.
|
599 |
+
Get the trust fund to the bank early.
|
600 |
+
Choose between the high road and the low.
|
601 |
+
A plea for funds seems to come again.
|
602 |
+
He lent his coat to the tall gaunt stranger.
|
603 |
+
There is a strong chance it will happen once more.
|
604 |
+
The duke left the park in a silver coach.
|
605 |
+
Greet the new guests and leave quickly.
|
606 |
+
When the frost has come it is time for turkey.
|
607 |
+
Sweet words work better than fierce.
|
608 |
+
A thin stripe runs down the middle.
|
609 |
+
A six comes up more often than a ten.
|
610 |
+
Lush fern grow on the lofty rocks.
|
611 |
+
The ram scared the school children off.
|
612 |
+
The team with the best timing looks good.
|
613 |
+
The farmer swapped his horse for a brown ox.
|
614 |
+
Sit on the perch and tell the others what to do.
|
615 |
+
A steep trail is painful for our feet.
|
616 |
+
The early phase of life moves fast.
|
617 |
+
Green moss grows on the northern side.
|
618 |
+
Tea in thin china has a sweet taste.
|
619 |
+
Pitch the straw through the door of the stable.
|
620 |
+
The latch on the back gate needed a nail.
|
621 |
+
The goose was brought straight from the old market.
|
622 |
+
The sink is the thing in which we pile dishes.
|
623 |
+
A whiff of it will cure the most stubborn cold.
|
624 |
+
The facts don't always show who is right.
|
625 |
+
She flaps her cape as she parades the street.
|
626 |
+
The loss of the cruiser was a blow to the fleet.
|
627 |
+
Loop the braid to the left and then over.
|
628 |
+
Plead with the lawyer to drop the lost cause.
|
629 |
+
Calves thrive on tender spring grass.
|
630 |
+
Post no bills on this office wall.
|
631 |
+
Tear a thin sheet from the yellow pad.
|
632 |
+
A cruise in warm waters in a sleek yacht is fun.
|
633 |
+
A streak of color ran down the left edge.
|
634 |
+
It was done before the boy could see it.
|
635 |
+
Crouch before you jump or miss the mark.
|
636 |
+
Pack the kits and don't forget the salt.
|
637 |
+
The square peg will settle in the round hole.
|
638 |
+
Fine soap saves tender skin.
|
639 |
+
Poached eggs and tea must suffice.
|
640 |
+
Bad nerves are jangled by a door slam.
|
641 |
+
Ship maps are different from those for planes.
|
642 |
+
Dimes showered down from all sides.
|
643 |
+
They sang the same tunes at each party.
|
644 |
+
The sky in the west is tinged with orange red.
|
645 |
+
The pods of peas ferment in bare fields.
|
646 |
+
The horse balked and threw the tall rider.
|
647 |
+
The hitch between the horse and cart broke.
|
648 |
+
Pile the coal high in the shed corner.
|
649 |
+
A gold vase is both rare and costly.
|
650 |
+
The knife was hung inside its bright sheath.
|
651 |
+
The rarest spice comes from the far East.
|
652 |
+
The roof should be tilted at a sharp slant.
|
653 |
+
A smatter of French is worse than none.
|
654 |
+
The mule trod the treadmill day and night.
|
655 |
+
The aim of the contest is to raise a great fund.
|
656 |
+
To send it now in large amounts is bad.
|
657 |
+
There is a fine hard tang in salty air.
|
658 |
+
Cod is the main business of the north shore.
|
659 |
+
The slab was hewn from heavy blocks of slate.
|
660 |
+
Dunk the stale biscuits into strong drink.
|
661 |
+
Hang tinsel from both branches.
|
662 |
+
Cap the jar with a tight brass cover.
|
663 |
+
The poor boy missed the boat again.
|
664 |
+
Be sure to set the lamp firmly in the hole.
|
665 |
+
Pick a card and slip it under the pack.
|
666 |
+
A round mat will cover the dull spot.
|
667 |
+
The first part of the plan needs changing.
|
668 |
+
A good book informs of what we ought to know.
|
669 |
+
The mail comes in three batches per day.
|
670 |
+
You cannot brew tea in a cold pot.
|
671 |
+
Dots of light betrayed the black cat.
|
672 |
+
Put the chart on the mantel and tack it down.
|
673 |
+
The night shift men rate extra pay.
|
674 |
+
The red paper brightened the dim stage.
|
675 |
+
See the player scoot to third base.
|
676 |
+
Slide the bill between the two leaves.
|
677 |
+
Many hands help get the job done.
|
678 |
+
We don't like to admit our small faults.
|
679 |
+
No doubt about the way the wind blows.
|
680 |
+
Dig deep in the earth for pirate's gold.
|
681 |
+
The steady drip is worse than a drenching rain.
|
682 |
+
A flat pack takes less luggage space.
|
683 |
+
Green ice frosted the punch bowl.
|
684 |
+
A stuffed chair slipped from the moving van.
|
685 |
+
The stitch will serve but needs to be shortened.
|
686 |
+
A thin book fits in the side pocket.
|
687 |
+
The gloss on top made it unfit to read.
|
688 |
+
The hail pattered on the burnt brown grass.
|
689 |
+
Seven seals were stamped on great sheets.
|
690 |
+
Our troops are set to strike heavy blows.
|
691 |
+
The store was jammed before the sale could start.
|
692 |
+
It was a bad error on the part of the new judge.
|
693 |
+
One step more and the board will collapse.
|
694 |
+
Take the match and strike it against your shoe.
|
695 |
+
The pot boiled, but the contents failed to jell.
|
696 |
+
The baby puts his right foot in his mouth.
|
697 |
+
The bombs left most of the town in ruins.
|
698 |
+
Stop and stare at the hard working man.
|
699 |
+
The streets are narrow and full of sharp turns.
|
700 |
+
The pup jerked the leash as he saw a feline shape.
|
701 |
+
Open your book to the first page.
|
702 |
+
Fish evade the net and swim off.
|
703 |
+
Dip the pail once and let it settle.
|
704 |
+
Will you please answer that phone.
|
705 |
+
The big red apple fell to the ground.
|
706 |
+
The curtain rose and the show was on.
|
707 |
+
The young prince became heir to the throne.
|
708 |
+
He sent the boy on a short errand.
|
709 |
+
Leave now and you will arrive on time.
|
710 |
+
The corner store was robbed last night.
|
711 |
+
A gold ring will please most any girl.
|
712 |
+
The long journey home took a year.
|
713 |
+
She saw a cat in the neighbor's house.
|
714 |
+
A pink shell was found on the sandy beach.
|
715 |
+
Small children came to see him.
|
716 |
+
The grass and bushes were wet with dew.
|
717 |
+
The blind man counted his old coins.
|
718 |
+
A severe storm tore down the barn.
|
719 |
+
She called his name many times.
|
720 |
+
When you hear the bell, come quickly.
|
extensions/coqui_tts/languages.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"Arabic": "ar",
|
3 |
+
"Chinese": "zh-cn",
|
4 |
+
"Czech": "cs",
|
5 |
+
"Dutch": "nl",
|
6 |
+
"English": "en",
|
7 |
+
"French": "fr",
|
8 |
+
"German": "de",
|
9 |
+
"Hungarian": "hu",
|
10 |
+
"Italian": "it",
|
11 |
+
"Japanese": "ja",
|
12 |
+
"Korean": "ko",
|
13 |
+
"Polish": "pl",
|
14 |
+
"Portuguese": "pt",
|
15 |
+
"Russian": "ru",
|
16 |
+
"Spanish": "es",
|
17 |
+
"Turkish": "tr"
|
18 |
+
}
|
extensions/coqui_tts/requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
TTS==0.21.*
|
extensions/coqui_tts/script.py
ADDED
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import html
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import random
|
5 |
+
import time
|
6 |
+
from pathlib import Path
|
7 |
+
|
8 |
+
import gradio as gr
|
9 |
+
import torch
|
10 |
+
from TTS.api import TTS
|
11 |
+
from TTS.utils.synthesizer import Synthesizer
|
12 |
+
|
13 |
+
from modules import chat, shared, ui_chat
|
14 |
+
from modules.ui import create_refresh_button
|
15 |
+
from modules.utils import gradio
|
16 |
+
|
17 |
+
os.environ["COQUI_TOS_AGREED"] = "1"
|
18 |
+
|
19 |
+
params = {
|
20 |
+
"activate": True,
|
21 |
+
"autoplay": True,
|
22 |
+
"show_text": False,
|
23 |
+
"remove_trailing_dots": False,
|
24 |
+
"voice": "female_01.wav",
|
25 |
+
"language": "English",
|
26 |
+
"model_name": "tts_models/multilingual/multi-dataset/xtts_v2",
|
27 |
+
"device": "cuda" if torch.cuda.is_available() else "cpu"
|
28 |
+
}
|
29 |
+
|
30 |
+
this_dir = str(Path(__file__).parent.resolve())
|
31 |
+
model = None
|
32 |
+
with open(Path(f"{this_dir}/languages.json"), encoding='utf8') as f:
|
33 |
+
languages = json.load(f)
|
34 |
+
|
35 |
+
|
36 |
+
def get_available_voices():
|
37 |
+
return sorted([voice.name for voice in Path(f"{this_dir}/voices").glob("*.wav")])
|
38 |
+
|
39 |
+
|
40 |
+
def preprocess(raw_input):
|
41 |
+
raw_input = html.unescape(raw_input)
|
42 |
+
# raw_input = raw_input.strip("\"")
|
43 |
+
return raw_input
|
44 |
+
|
45 |
+
|
46 |
+
def new_split_into_sentences(self, text):
|
47 |
+
sentences = self.seg.segment(text)
|
48 |
+
if params['remove_trailing_dots']:
|
49 |
+
sentences_without_dots = []
|
50 |
+
for sentence in sentences:
|
51 |
+
if sentence.endswith('.') and not sentence.endswith('...'):
|
52 |
+
sentence = sentence[:-1]
|
53 |
+
|
54 |
+
sentences_without_dots.append(sentence)
|
55 |
+
|
56 |
+
return sentences_without_dots
|
57 |
+
else:
|
58 |
+
return sentences
|
59 |
+
|
60 |
+
|
61 |
+
Synthesizer.split_into_sentences = new_split_into_sentences
|
62 |
+
|
63 |
+
|
64 |
+
def load_model():
|
65 |
+
model = TTS(params["model_name"]).to(params["device"])
|
66 |
+
return model
|
67 |
+
|
68 |
+
|
69 |
+
def remove_tts_from_history(history):
|
70 |
+
for i, entry in enumerate(history['internal']):
|
71 |
+
history['visible'][i] = [history['visible'][i][0], entry[1]]
|
72 |
+
|
73 |
+
return history
|
74 |
+
|
75 |
+
|
76 |
+
def toggle_text_in_history(history):
|
77 |
+
for i, entry in enumerate(history['visible']):
|
78 |
+
visible_reply = entry[1]
|
79 |
+
if visible_reply.startswith('<audio'):
|
80 |
+
if params['show_text']:
|
81 |
+
reply = history['internal'][i][1]
|
82 |
+
history['visible'][i] = [history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>\n\n{reply}"]
|
83 |
+
else:
|
84 |
+
history['visible'][i] = [history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"]
|
85 |
+
|
86 |
+
return history
|
87 |
+
|
88 |
+
|
89 |
+
def random_sentence():
|
90 |
+
with open(Path("extensions/coqui_tts/harvard_sentences.txt")) as f:
|
91 |
+
return random.choice(list(f))
|
92 |
+
|
93 |
+
|
94 |
+
def voice_preview(string):
|
95 |
+
string = html.unescape(string) or random_sentence()
|
96 |
+
|
97 |
+
output_file = Path('extensions/coqui_tts/outputs/voice_preview.wav')
|
98 |
+
model.tts_to_file(
|
99 |
+
text=string,
|
100 |
+
file_path=output_file,
|
101 |
+
speaker_wav=[f"{this_dir}/voices/{params['voice']}"],
|
102 |
+
language=languages[params["language"]]
|
103 |
+
)
|
104 |
+
|
105 |
+
return f'<audio src="file/{output_file.as_posix()}?{int(time.time())}" controls autoplay></audio>'
|
106 |
+
|
107 |
+
|
108 |
+
def history_modifier(history):
|
109 |
+
# Remove autoplay from the last reply
|
110 |
+
if len(history['internal']) > 0:
|
111 |
+
history['visible'][-1] = [
|
112 |
+
history['visible'][-1][0],
|
113 |
+
history['visible'][-1][1].replace('controls autoplay>', 'controls>')
|
114 |
+
]
|
115 |
+
|
116 |
+
return history
|
117 |
+
|
118 |
+
|
119 |
+
def state_modifier(state):
|
120 |
+
if not params['activate']:
|
121 |
+
return state
|
122 |
+
|
123 |
+
state['stream'] = False
|
124 |
+
return state
|
125 |
+
|
126 |
+
|
127 |
+
def input_modifier(string, state):
|
128 |
+
if not params['activate']:
|
129 |
+
return string
|
130 |
+
|
131 |
+
shared.processing_message = "*Is recording a voice message...*"
|
132 |
+
return string
|
133 |
+
|
134 |
+
|
135 |
+
def output_modifier(string, state):
|
136 |
+
if not params['activate']:
|
137 |
+
return string
|
138 |
+
|
139 |
+
original_string = string
|
140 |
+
string = preprocess(html.unescape(string))
|
141 |
+
if string == '':
|
142 |
+
string = '*Empty reply, try regenerating*'
|
143 |
+
else:
|
144 |
+
output_file = Path(f'extensions/coqui_tts/outputs/{state["character_menu"]}_{int(time.time())}.wav')
|
145 |
+
model.tts_to_file(
|
146 |
+
text=string,
|
147 |
+
file_path=output_file,
|
148 |
+
speaker_wav=[f"{this_dir}/voices/{params['voice']}"],
|
149 |
+
language=languages[params["language"]]
|
150 |
+
)
|
151 |
+
|
152 |
+
autoplay = 'autoplay' if params['autoplay'] else ''
|
153 |
+
string = f'<audio src="file/{output_file.as_posix()}" controls {autoplay}></audio>'
|
154 |
+
if params['show_text']:
|
155 |
+
string += f'\n\n{original_string}'
|
156 |
+
|
157 |
+
shared.processing_message = "*Is typing...*"
|
158 |
+
return string
|
159 |
+
|
160 |
+
|
161 |
+
def custom_css():
|
162 |
+
path_to_css = Path(f"{this_dir}/style.css")
|
163 |
+
return open(path_to_css, 'r').read()
|
164 |
+
|
165 |
+
|
166 |
+
def setup():
|
167 |
+
global model
|
168 |
+
print("[XTTS] Loading XTTS...")
|
169 |
+
model = load_model()
|
170 |
+
print("[XTTS] Done!")
|
171 |
+
Path(f"{this_dir}/outputs").mkdir(parents=True, exist_ok=True)
|
172 |
+
|
173 |
+
|
174 |
+
def ui():
|
175 |
+
with gr.Accordion("Coqui TTS (XTTSv2)"):
|
176 |
+
with gr.Row():
|
177 |
+
activate = gr.Checkbox(value=params['activate'], label='Activate TTS')
|
178 |
+
autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')
|
179 |
+
|
180 |
+
with gr.Row():
|
181 |
+
show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')
|
182 |
+
remove_trailing_dots = gr.Checkbox(value=params['remove_trailing_dots'], label='Remove trailing "." from text segments before converting to audio')
|
183 |
+
|
184 |
+
with gr.Row():
|
185 |
+
with gr.Row():
|
186 |
+
voice = gr.Dropdown(get_available_voices(), label="Voice wav", value=params["voice"])
|
187 |
+
create_refresh_button(voice, lambda: None, lambda: {'choices': get_available_voices(), 'value': params["voice"]}, 'refresh-button')
|
188 |
+
|
189 |
+
language = gr.Dropdown(languages.keys(), label="Language", value=params["language"])
|
190 |
+
|
191 |
+
with gr.Row():
|
192 |
+
preview_text = gr.Text(show_label=False, placeholder="Preview text", elem_id="silero_preview_text")
|
193 |
+
preview_play = gr.Button("Preview")
|
194 |
+
preview_audio = gr.HTML(visible=False)
|
195 |
+
|
196 |
+
with gr.Row():
|
197 |
+
convert = gr.Button('Permanently replace audios with the message texts')
|
198 |
+
convert_cancel = gr.Button('Cancel', visible=False)
|
199 |
+
convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False)
|
200 |
+
|
201 |
+
# Convert history with confirmation
|
202 |
+
convert_arr = [convert_confirm, convert, convert_cancel]
|
203 |
+
convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
|
204 |
+
convert_confirm.click(
|
205 |
+
lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then(
|
206 |
+
remove_tts_from_history, gradio('history'), gradio('history')).then(
|
207 |
+
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
|
208 |
+
chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))
|
209 |
+
|
210 |
+
convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
|
211 |
+
|
212 |
+
# Toggle message text in history
|
213 |
+
show_text.change(
|
214 |
+
lambda x: params.update({"show_text": x}), show_text, None).then(
|
215 |
+
toggle_text_in_history, gradio('history'), gradio('history')).then(
|
216 |
+
chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then(
|
217 |
+
chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))
|
218 |
+
|
219 |
+
# Event functions to update the parameters in the backend
|
220 |
+
activate.change(lambda x: params.update({"activate": x}), activate, None)
|
221 |
+
autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None)
|
222 |
+
remove_trailing_dots.change(lambda x: params.update({"remove_trailing_dots": x}), remove_trailing_dots, None)
|
223 |
+
voice.change(lambda x: params.update({"voice": x}), voice, None)
|
224 |
+
language.change(lambda x: params.update({"language": x}), language, None)
|
225 |
+
|
226 |
+
# Play preview
|
227 |
+
preview_text.submit(voice_preview, preview_text, preview_audio)
|
228 |
+
preview_play.click(voice_preview, preview_text, preview_audio)
|
extensions/coqui_tts/style.css
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.SDAP .hires_opts input[type="number"] {
|
2 |
+
width: 6em !important;
|
3 |
+
}
|
4 |
+
|
5 |
+
/* silero_tts preview */
|
6 |
+
.form:has(> #silero_preview_text) {
|
7 |
+
min-width: 75%
|
8 |
+
}
|
extensions/coqui_tts/voices/arnold.wav
ADDED
Binary file (897 kB). View file
|
|
extensions/coqui_tts/voices/female_01.wav
ADDED
Binary file (501 kB). View file
|
|
extensions/coqui_tts/voices/female_02.wav
ADDED
Binary file (334 kB). View file
|
|
extensions/example/script.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
An example of extension. It does nothing, but you can add transformations
|
3 |
+
before the return statements to customize the webui behavior.
|
4 |
+
|
5 |
+
Starting from history_modifier and ending in output_modifier, the
|
6 |
+
functions are declared in the same order that they are called at
|
7 |
+
generation time.
|
8 |
+
"""
|
9 |
+
|
10 |
+
import gradio as gr
|
11 |
+
import torch
|
12 |
+
from transformers import LogitsProcessor
|
13 |
+
|
14 |
+
from modules import chat, shared
|
15 |
+
from modules.text_generation import (
|
16 |
+
decode,
|
17 |
+
encode,
|
18 |
+
generate_reply,
|
19 |
+
)
|
20 |
+
|
21 |
+
params = {
|
22 |
+
"display_name": "Example Extension",
|
23 |
+
"is_tab": False,
|
24 |
+
}
|
25 |
+
|
26 |
+
class MyLogits(LogitsProcessor):
|
27 |
+
"""
|
28 |
+
Manipulates the probabilities for the next token before it gets sampled.
|
29 |
+
Used in the logits_processor_modifier function below.
|
30 |
+
"""
|
31 |
+
def __init__(self):
|
32 |
+
pass
|
33 |
+
|
34 |
+
def __call__(self, input_ids, scores):
|
35 |
+
# probs = torch.softmax(scores, dim=-1, dtype=torch.float)
|
36 |
+
# probs[0] /= probs[0].sum()
|
37 |
+
# scores = torch.log(probs / (1 - probs))
|
38 |
+
return scores
|
39 |
+
|
40 |
+
def history_modifier(history):
|
41 |
+
"""
|
42 |
+
Modifies the chat history.
|
43 |
+
Only used in chat mode.
|
44 |
+
"""
|
45 |
+
return history
|
46 |
+
|
47 |
+
def state_modifier(state):
|
48 |
+
"""
|
49 |
+
Modifies the state variable, which is a dictionary containing the input
|
50 |
+
values in the UI like sliders and checkboxes.
|
51 |
+
"""
|
52 |
+
return state
|
53 |
+
|
54 |
+
def chat_input_modifier(text, visible_text, state):
|
55 |
+
"""
|
56 |
+
Modifies the user input string in chat mode (visible_text).
|
57 |
+
You can also modify the internal representation of the user
|
58 |
+
input (text) to change how it will appear in the prompt.
|
59 |
+
"""
|
60 |
+
return text, visible_text
|
61 |
+
|
62 |
+
def input_modifier(string, state, is_chat=False):
|
63 |
+
"""
|
64 |
+
In default/notebook modes, modifies the whole prompt.
|
65 |
+
|
66 |
+
In chat mode, it is the same as chat_input_modifier but only applied
|
67 |
+
to "text", here called "string", and not to "visible_text".
|
68 |
+
"""
|
69 |
+
return string
|
70 |
+
|
71 |
+
def bot_prefix_modifier(string, state):
|
72 |
+
"""
|
73 |
+
Modifies the prefix for the next bot reply in chat mode.
|
74 |
+
By default, the prefix will be something like "Bot Name:".
|
75 |
+
"""
|
76 |
+
return string
|
77 |
+
|
78 |
+
def tokenizer_modifier(state, prompt, input_ids, input_embeds):
|
79 |
+
"""
|
80 |
+
Modifies the input ids and embeds.
|
81 |
+
Used by the multimodal extension to put image embeddings in the prompt.
|
82 |
+
Only used by loaders that use the transformers library for sampling.
|
83 |
+
"""
|
84 |
+
return prompt, input_ids, input_embeds
|
85 |
+
|
86 |
+
def logits_processor_modifier(processor_list, input_ids):
|
87 |
+
"""
|
88 |
+
Adds logits processors to the list, allowing you to access and modify
|
89 |
+
the next token probabilities.
|
90 |
+
Only used by loaders that use the transformers library for sampling.
|
91 |
+
"""
|
92 |
+
processor_list.append(MyLogits())
|
93 |
+
return processor_list
|
94 |
+
|
95 |
+
def output_modifier(string, state, is_chat=False):
|
96 |
+
"""
|
97 |
+
Modifies the LLM output before it gets presented.
|
98 |
+
|
99 |
+
In chat mode, the modified version goes into history['visible'],
|
100 |
+
and the original version goes into history['internal'].
|
101 |
+
"""
|
102 |
+
return string
|
103 |
+
|
104 |
+
def custom_generate_chat_prompt(user_input, state, **kwargs):
|
105 |
+
"""
|
106 |
+
Replaces the function that generates the prompt from the chat history.
|
107 |
+
Only used in chat mode.
|
108 |
+
"""
|
109 |
+
result = chat.generate_chat_prompt(user_input, state, **kwargs)
|
110 |
+
return result
|
111 |
+
|
112 |
+
def custom_css():
|
113 |
+
"""
|
114 |
+
Returns a CSS string that gets appended to the CSS for the webui.
|
115 |
+
"""
|
116 |
+
return ''
|
117 |
+
|
118 |
+
def custom_js():
|
119 |
+
"""
|
120 |
+
Returns a javascript string that gets appended to the javascript
|
121 |
+
for the webui.
|
122 |
+
"""
|
123 |
+
return ''
|
124 |
+
|
125 |
+
def setup():
|
126 |
+
"""
|
127 |
+
Gets executed only once, when the extension is imported.
|
128 |
+
"""
|
129 |
+
pass
|
130 |
+
|
131 |
+
def ui():
|
132 |
+
"""
|
133 |
+
Gets executed when the UI is drawn. Custom gradio elements and
|
134 |
+
their corresponding event handlers should be defined here.
|
135 |
+
|
136 |
+
To learn about gradio components, check out the docs:
|
137 |
+
https://gradio.app/docs/
|
138 |
+
"""
|
139 |
+
pass
|
extensions/gallery/__pycache__/script.cpython-311.pyc
ADDED
Binary file (6.89 kB). View file
|
|
extensions/gallery/script.js
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
let gallery_element = document.getElementById('gallery-extension');
|
2 |
+
let chat_mode_element = document.getElementById('chat-mode');
|
3 |
+
|
4 |
+
let extensions_block = document.getElementById('extensions');
|
5 |
+
let extensions_block_size = extensions_block.childNodes.length;
|
6 |
+
let gallery_only = (extensions_block_size == 5);
|
7 |
+
|
8 |
+
function gotoFirstPage() {
|
9 |
+
const firstPageButton = gallery_element.querySelector('.paginate > button');
|
10 |
+
if (firstPageButton) {
|
11 |
+
firstPageButton.click();
|
12 |
+
}
|
13 |
+
}
|
14 |
+
|
15 |
+
document.querySelector('.header_bar').addEventListener('click', function(event) {
|
16 |
+
if (event.target.tagName === 'BUTTON') {
|
17 |
+
const buttonText = event.target.textContent.trim();
|
18 |
+
|
19 |
+
let chat_visible = (buttonText == 'Chat');
|
20 |
+
let default_visible = (buttonText == 'Default');
|
21 |
+
let notebook_visible = (buttonText == 'Notebook');
|
22 |
+
let chat_mode_visible = (chat_mode_element.offsetHeight > 0 && chat_mode_element.offsetWidth > 0);
|
23 |
+
|
24 |
+
// Only show this extension in the Chat tab
|
25 |
+
if (chat_visible) {
|
26 |
+
if (chat_mode_visible) {
|
27 |
+
gallery_element.style.display = 'block';
|
28 |
+
extensions_block.style.display = '';
|
29 |
+
} else {
|
30 |
+
gallery_element.style.display = 'none';
|
31 |
+
extensions_block.style.display = 'none';
|
32 |
+
}
|
33 |
+
} else {
|
34 |
+
gallery_element.style.display = 'none';
|
35 |
+
if (gallery_only) {
|
36 |
+
extensions_block.style.display = 'none';
|
37 |
+
}
|
38 |
+
}
|
39 |
+
}
|
40 |
+
});
|
extensions/gallery/script.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
from modules.html_generator import get_image_cache
|
6 |
+
from modules.shared import gradio, settings
|
7 |
+
|
8 |
+
|
9 |
+
cards = []
|
10 |
+
|
11 |
+
|
12 |
+
def generate_css():
|
13 |
+
css = """
|
14 |
+
.highlighted-border {
|
15 |
+
border-color: rgb(249, 115, 22) !important;
|
16 |
+
}
|
17 |
+
|
18 |
+
.character-gallery > .gallery {
|
19 |
+
margin: 1rem 0;
|
20 |
+
display: grid !important;
|
21 |
+
grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
|
22 |
+
grid-column-gap: 0.4rem;
|
23 |
+
grid-row-gap: 1.2rem;
|
24 |
+
}
|
25 |
+
|
26 |
+
.character-gallery > .label {
|
27 |
+
display: none !important;
|
28 |
+
}
|
29 |
+
|
30 |
+
.character-gallery button.gallery-item {
|
31 |
+
display: contents;
|
32 |
+
}
|
33 |
+
|
34 |
+
.character-container {
|
35 |
+
cursor: pointer;
|
36 |
+
text-align: center;
|
37 |
+
position: relative;
|
38 |
+
opacity: 0.85;
|
39 |
+
}
|
40 |
+
|
41 |
+
.character-container:hover {
|
42 |
+
opacity: 1;
|
43 |
+
}
|
44 |
+
|
45 |
+
.character-container .placeholder, .character-container img {
|
46 |
+
width: 150px;
|
47 |
+
height: 200px;
|
48 |
+
background-color: gray;
|
49 |
+
object-fit: cover;
|
50 |
+
margin: 0 auto;
|
51 |
+
border-radius: 1rem;
|
52 |
+
border: 3px solid white;
|
53 |
+
box-shadow: 3px 3px 6px 0px rgb(0 0 0 / 50%);
|
54 |
+
}
|
55 |
+
|
56 |
+
.character-name {
|
57 |
+
margin-top: 0.3rem;
|
58 |
+
display: block;
|
59 |
+
font-size: 1.2rem;
|
60 |
+
font-weight: 600;
|
61 |
+
overflow-wrap: anywhere;
|
62 |
+
}
|
63 |
+
"""
|
64 |
+
return css
|
65 |
+
|
66 |
+
|
67 |
+
def generate_html():
|
68 |
+
global cards
|
69 |
+
cards = []
|
70 |
+
# Iterate through files in image folder
|
71 |
+
for file in sorted(Path("characters").glob("*")):
|
72 |
+
if file.suffix in [".json", ".yml", ".yaml"]:
|
73 |
+
character = file.stem
|
74 |
+
container_html = '<div class="character-container">'
|
75 |
+
image_html = "<div class='placeholder'></div>"
|
76 |
+
|
77 |
+
for path in [Path(f"characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]:
|
78 |
+
if path.exists():
|
79 |
+
image_html = f'<img src="file/{get_image_cache(path)}">'
|
80 |
+
break
|
81 |
+
|
82 |
+
container_html += f'{image_html} <span class="character-name">{character}</span>'
|
83 |
+
container_html += "</div>"
|
84 |
+
cards.append([container_html, character])
|
85 |
+
|
86 |
+
return cards
|
87 |
+
|
88 |
+
|
89 |
+
def filter_cards(filter_str=''):
|
90 |
+
if filter_str == '':
|
91 |
+
return cards
|
92 |
+
|
93 |
+
filter_upper = filter_str.upper()
|
94 |
+
return [k for k in cards if filter_upper in k[1].upper()]
|
95 |
+
|
96 |
+
|
97 |
+
def select_character(evt: gr.SelectData):
|
98 |
+
return (evt.value[1])
|
99 |
+
|
100 |
+
|
101 |
+
def custom_js():
|
102 |
+
path_to_js = Path(__file__).parent.resolve() / 'script.js'
|
103 |
+
return open(path_to_js, 'r').read()
|
104 |
+
|
105 |
+
|
106 |
+
def ui():
|
107 |
+
with gr.Accordion("Character gallery", open=settings["gallery-open"], elem_id='gallery-extension'):
|
108 |
+
gr.HTML(value="<style>" + generate_css() + "</style>")
|
109 |
+
with gr.Row():
|
110 |
+
filter_box = gr.Textbox(label='', placeholder='Filter', lines=1, max_lines=1, container=False, elem_id='gallery-filter-box')
|
111 |
+
gr.ClearButton(filter_box, value='Clear', elem_classes='refresh-button')
|
112 |
+
update = gr.Button("Refresh", elem_classes='refresh-button')
|
113 |
+
|
114 |
+
gallery = gr.Dataset(
|
115 |
+
components=[gr.HTML(visible=False)],
|
116 |
+
label="",
|
117 |
+
samples=generate_html(),
|
118 |
+
elem_classes=["character-gallery"],
|
119 |
+
samples_per_page=settings["gallery-items_per_page"]
|
120 |
+
)
|
121 |
+
|
122 |
+
filter_box.change(lambda: None, None, None, _js=f'() => {{{custom_js()}; gotoFirstPage()}}').success(
|
123 |
+
filter_cards, filter_box, gallery).then(
|
124 |
+
lambda x: gr.update(elem_classes='highlighted-border' if x != '' else ''), filter_box, filter_box, show_progress=False)
|
125 |
+
|
126 |
+
update.click(generate_html, [], None).success(
|
127 |
+
filter_cards, filter_box, gallery)
|
128 |
+
|
129 |
+
gallery.select(select_character, None, gradio['character_menu'])
|
extensions/google_translate/requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
deep-translator==1.9.2
|
extensions/google_translate/script.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import html
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
from deep_translator import GoogleTranslator
|
5 |
+
|
6 |
+
params = {
|
7 |
+
"activate": True,
|
8 |
+
"language string": "ja",
|
9 |
+
}
|
10 |
+
|
11 |
+
language_codes = {'Afrikaans': 'af', 'Albanian': 'sq', 'Amharic': 'am', 'Arabic': 'ar', 'Armenian': 'hy', 'Azerbaijani': 'az', 'Basque': 'eu', 'Belarusian': 'be', 'Bengali': 'bn', 'Bosnian': 'bs', 'Bulgarian': 'bg', 'Catalan': 'ca', 'Cebuano': 'ceb', 'Chinese (Simplified)': 'zh-CN', 'Chinese (Traditional)': 'zh-TW', 'Corsican': 'co', 'Croatian': 'hr', 'Czech': 'cs', 'Danish': 'da', 'Dutch': 'nl', 'English': 'en', 'Esperanto': 'eo', 'Estonian': 'et', 'Finnish': 'fi', 'French': 'fr', 'Frisian': 'fy', 'Galician': 'gl', 'Georgian': 'ka', 'German': 'de', 'Greek': 'el', 'Gujarati': 'gu', 'Haitian Creole': 'ht', 'Hausa': 'ha', 'Hawaiian': 'haw', 'Hebrew': 'iw', 'Hindi': 'hi', 'Hmong': 'hmn', 'Hungarian': 'hu', 'Icelandic': 'is', 'Igbo': 'ig', 'Indonesian': 'id', 'Irish': 'ga', 'Italian': 'it', 'Japanese': 'ja', 'Javanese': 'jw', 'Kannada': 'kn', 'Kazakh': 'kk', 'Khmer': 'km', 'Korean': 'ko', 'Kurdish': 'ku', 'Kyrgyz': 'ky', 'Lao': 'lo', 'Latin': 'la', 'Latvian': 'lv', 'Lithuanian': 'lt', 'Luxembourgish': 'lb', 'Macedonian': 'mk', 'Malagasy': 'mg', 'Malay': 'ms', 'Malayalam': 'ml', 'Maltese': 'mt', 'Maori': 'mi', 'Marathi': 'mr', 'Mongolian': 'mn', 'Myanmar (Burmese)': 'my', 'Nepali': 'ne', 'Norwegian': 'no', 'Nyanja (Chichewa)': 'ny', 'Pashto': 'ps', 'Persian': 'fa', 'Polish': 'pl', 'Portuguese (Portugal, Brazil)': 'pt', 'Punjabi': 'pa', 'Romanian': 'ro', 'Russian': 'ru', 'Samoan': 'sm', 'Scots Gaelic': 'gd', 'Serbian': 'sr', 'Sesotho': 'st', 'Shona': 'sn', 'Sindhi': 'sd', 'Sinhala (Sinhalese)': 'si', 'Slovak': 'sk', 'Slovenian': 'sl', 'Somali': 'so', 'Spanish': 'es', 'Sundanese': 'su', 'Swahili': 'sw', 'Swedish': 'sv', 'Tagalog (Filipino)': 'tl', 'Tajik': 'tg', 'Tamil': 'ta', 'Telugu': 'te', 'Thai': 'th', 'Turkish': 'tr', 'Ukrainian': 'uk', 'Urdu': 'ur', 'Uzbek': 'uz', 'Vietnamese': 'vi', 'Welsh': 'cy', 'Xhosa': 'xh', 'Yiddish': 'yi', 'Yoruba': 'yo', 'Zulu': 'zu'}
|
12 |
+
|
13 |
+
|
14 |
+
def input_modifier(string):
|
15 |
+
"""
|
16 |
+
This function is applied to your text inputs before
|
17 |
+
they are fed into the model.
|
18 |
+
"""
|
19 |
+
if not params['activate']:
|
20 |
+
return string
|
21 |
+
|
22 |
+
return GoogleTranslator(source=params['language string'], target='en').translate(string)
|
23 |
+
|
24 |
+
|
25 |
+
def output_modifier(string):
|
26 |
+
"""
|
27 |
+
This function is applied to the model outputs.
|
28 |
+
"""
|
29 |
+
if not params['activate']:
|
30 |
+
return string
|
31 |
+
|
32 |
+
translated_str = GoogleTranslator(source='en', target=params['language string']).translate(html.unescape(string))
|
33 |
+
return html.escape(translated_str)
|
34 |
+
|
35 |
+
|
36 |
+
def bot_prefix_modifier(string):
|
37 |
+
"""
|
38 |
+
This function is only applied in chat mode. It modifies
|
39 |
+
the prefix text for the Bot and can be used to bias its
|
40 |
+
behavior.
|
41 |
+
"""
|
42 |
+
|
43 |
+
return string
|
44 |
+
|
45 |
+
|
46 |
+
def ui():
|
47 |
+
# Finding the language name from the language code to use as the default value
|
48 |
+
language_name = list(language_codes.keys())[list(language_codes.values()).index(params['language string'])]
|
49 |
+
|
50 |
+
# Gradio elements
|
51 |
+
with gr.Row():
|
52 |
+
activate = gr.Checkbox(value=params['activate'], label='Activate translation')
|
53 |
+
|
54 |
+
with gr.Row():
|
55 |
+
language = gr.Dropdown(value=language_name, choices=[k for k in language_codes], label='Language')
|
56 |
+
|
57 |
+
# Event functions to update the parameters in the backend
|
58 |
+
activate.change(lambda x: params.update({"activate": x}), activate, None)
|
59 |
+
language.change(lambda x: params.update({"language string": language_codes[x]}), language, None)
|
extensions/long_replies/script.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from modules import chat, shared
|
3 |
+
from modules.text_generation import (
|
4 |
+
decode,
|
5 |
+
encode,
|
6 |
+
generate_reply,
|
7 |
+
)
|
8 |
+
from transformers import LogitsProcessor
|
9 |
+
import gradio as gr
|
10 |
+
|
11 |
+
params = {
|
12 |
+
"display_name": "Long replies",
|
13 |
+
"is_tab": False,
|
14 |
+
"min_length": 120,
|
15 |
+
}
|
16 |
+
|
17 |
+
initial_size = 0
|
18 |
+
|
19 |
+
class MyLogits(LogitsProcessor):
|
20 |
+
"""
|
21 |
+
Manipulates the probabilities for the next token before it gets sampled.
|
22 |
+
Used in the logits_processor_modifier function below.
|
23 |
+
"""
|
24 |
+
def __init__(self):
|
25 |
+
self.newline_id = shared.tokenizer.encode('\n')[-1]
|
26 |
+
pass
|
27 |
+
|
28 |
+
def __call__(self, input_ids, scores):
|
29 |
+
if input_ids.shape[-1] - initial_size < params["min_length"]:
|
30 |
+
scores[...,self.newline_id] = -1000
|
31 |
+
# scores[...,shared.tokenizer.eos_token_id] = -1000
|
32 |
+
|
33 |
+
# probs = torch.softmax(scores, dim=-1, dtype=torch.float)
|
34 |
+
# probs[0] /= probs[0].sum()
|
35 |
+
# scores = torch.log(probs / (1 - probs))
|
36 |
+
return scores
|
37 |
+
|
38 |
+
def history_modifier(history):
|
39 |
+
"""
|
40 |
+
Modifies the chat history.
|
41 |
+
Only used in chat mode.
|
42 |
+
"""
|
43 |
+
return history
|
44 |
+
|
45 |
+
def state_modifier(state):
|
46 |
+
"""
|
47 |
+
Modifies the state variable, which is a dictionary containing the input
|
48 |
+
values in the UI like sliders and checkboxes.
|
49 |
+
"""
|
50 |
+
return state
|
51 |
+
|
52 |
+
def chat_input_modifier(text, visible_text, state):
|
53 |
+
"""
|
54 |
+
Modifies the user input string in chat mode (visible_text).
|
55 |
+
You can also modify the internal representation of the user
|
56 |
+
input (text) to change how it will appear in the prompt.
|
57 |
+
"""
|
58 |
+
return text, visible_text
|
59 |
+
|
60 |
+
def input_modifier(string, state):
|
61 |
+
"""
|
62 |
+
In default/notebook modes, modifies the whole prompt.
|
63 |
+
|
64 |
+
In chat mode, it is the same as chat_input_modifier but only applied
|
65 |
+
to "text", here called "string", and not to "visible_text".
|
66 |
+
"""
|
67 |
+
return string
|
68 |
+
|
69 |
+
def bot_prefix_modifier(string, state):
|
70 |
+
"""
|
71 |
+
Modifies the prefix for the next bot reply in chat mode.
|
72 |
+
By default, the prefix will be something like "Bot Name:".
|
73 |
+
"""
|
74 |
+
return string
|
75 |
+
|
76 |
+
def tokenizer_modifier(state, prompt, input_ids, input_embeds):
|
77 |
+
"""
|
78 |
+
Modifies the input ids and embeds.
|
79 |
+
Used by the multimodal extension to put image embeddings in the prompt.
|
80 |
+
Only used by loaders that use the transformers library for sampling.
|
81 |
+
"""
|
82 |
+
|
83 |
+
global initial_size
|
84 |
+
initial_size = input_ids.shape[-1]
|
85 |
+
|
86 |
+
return prompt, input_ids, input_embeds
|
87 |
+
|
88 |
+
def logits_processor_modifier(processor_list, input_ids):
|
89 |
+
"""
|
90 |
+
Adds logits processors to the list, allowing you to access and modify
|
91 |
+
the next token probabilities.
|
92 |
+
Only used by loaders that use the transformers library for sampling.
|
93 |
+
"""
|
94 |
+
processor_list.append(MyLogits())
|
95 |
+
return processor_list
|
96 |
+
|
97 |
+
def output_modifier(string, state):
|
98 |
+
"""
|
99 |
+
Modifies the LLM output before it gets presented.
|
100 |
+
|
101 |
+
In chat mode, the modified version goes into history['visible'],
|
102 |
+
and the original version goes into history['internal'].
|
103 |
+
"""
|
104 |
+
return string
|
105 |
+
|
106 |
+
def custom_generate_chat_prompt(user_input, state, **kwargs):
|
107 |
+
"""
|
108 |
+
Replaces the function that generates the prompt from the chat history.
|
109 |
+
Only used in chat mode.
|
110 |
+
"""
|
111 |
+
result = chat.generate_chat_prompt(user_input, state, **kwargs)
|
112 |
+
return result
|
113 |
+
|
114 |
+
def custom_css():
|
115 |
+
"""
|
116 |
+
Returns a CSS string that gets appended to the CSS for the webui.
|
117 |
+
"""
|
118 |
+
return ''
|
119 |
+
|
120 |
+
def custom_js():
|
121 |
+
"""
|
122 |
+
Returns a javascript string that gets appended to the javascript
|
123 |
+
for the webui.
|
124 |
+
"""
|
125 |
+
return ''
|
126 |
+
|
127 |
+
def setup():
|
128 |
+
"""
|
129 |
+
Gets executed only once, when the extension is imported.
|
130 |
+
"""
|
131 |
+
pass
|
132 |
+
|
133 |
+
def ui():
|
134 |
+
"""
|
135 |
+
Gets executed when the UI is drawn. Custom gradio elements and
|
136 |
+
their corresponding event handlers should be defined here.
|
137 |
+
|
138 |
+
To learn about gradio components, check out the docs:
|
139 |
+
https://gradio.app/docs/
|
140 |
+
"""
|
141 |
+
|
142 |
+
min_length = gr.Slider(0, 800, step=10, value=params['min_length'], label='Minimum reply length')
|
143 |
+
min_length.change(lambda x: params.update({'min_length': x}), min_length, None)
|
extensions/multimodal/DOCS.md
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Technical description of multimodal extension
|
2 |
+
|
3 |
+
## Working principle
|
4 |
+
Multimodality extension does most of the stuff which is required for any image input:
|
5 |
+
|
6 |
+
- adds the UI
|
7 |
+
- saves the images as base64 JPEGs to history
|
8 |
+
- provides the hooks to the UI
|
9 |
+
- if there are images in the prompt, it:
|
10 |
+
- splits the prompt to text and image parts
|
11 |
+
- adds image start/end markers to text parts, then encodes and embeds the text parts
|
12 |
+
- calls the vision pipeline to embed the images
|
13 |
+
- stitches the embeddings together, and returns them to text generation
|
14 |
+
- loads the appropriate vision pipeline, selected either from model name, or by specifying --multimodal-pipeline parameter
|
15 |
+
|
16 |
+
Now, for the pipelines, they:
|
17 |
+
|
18 |
+
- load the required vision models
|
19 |
+
- return some consts, for example the number of tokens taken up by image
|
20 |
+
- and most importantly: return the embeddings for LLM, given a list of images
|
21 |
+
|
22 |
+
## Prompts/history
|
23 |
+
|
24 |
+
To save images in prompt/history, this extension is using a base64 JPEG, wrapped in a HTML tag, like so:
|
25 |
+
```
|
26 |
+
<img src="data:image/jpeg;base64,{img_str}">
|
27 |
+
```
|
28 |
+
where `{img_str}` is the actual image data. This format makes displaying them in the UI for free. Do note, that this format is required to be exactly the same, the regex used to find the images is: `<img src="data:image/jpeg;base64,([A-Za-z0-9+/=]+)">`.
|
29 |
+
|
30 |
+
## LLM input
|
31 |
+
To describe the input, let's see it on an example prompt:
|
32 |
+
```
|
33 |
+
text1<image1>text2<image2>text3
|
34 |
+
```
|
35 |
+
where `textN` is N-th text, `<imageN>` is N-th image, in HTML format specified above.
|
36 |
+
|
37 |
+
**The first step is to split the prompt into image/text parts**, so we get:
|
38 |
+
```
|
39 |
+
['text1', '<image1>', 'text2', '<image2>', 'text3']
|
40 |
+
```
|
41 |
+
this is done in `MultimodalEmbedder._split_prompt(...)` function, which returns a list of `PromptPart`s - dataclasses wrapping the separate parts.
|
42 |
+
|
43 |
+
This function also appends the image start/end markers to text, which are provided by `AbstractMultimodalPipeline.image_start()` / `AbstractMultimodalPipeline.image_end()` functions. If image start is `<Img>`, and end is `</Img>`, this function will return:
|
44 |
+
```
|
45 |
+
['text1<Img>', '<image1>', '</Img>text2<Img>', '<image2>', '</Img>text3']
|
46 |
+
```
|
47 |
+
|
48 |
+
**The returned prompt parts are then turned into token embeddings.**
|
49 |
+
|
50 |
+
First, they are modified to token IDs, for the text it is done using standard `modules.text_generation.encode()` function, and for the images the returned token IDs are changed to placeholders. The placeholder is a list of `N` times `placeholder token id`, where `N` is specified using `AbstractMultimodalPipeline.num_image_embeds()`, and placeholder token IDs using `AbstractMultimodalPipeline.placeholder_token_id()`.
|
51 |
+
|
52 |
+
Now, based on the token IDs, the prompt might get truncated, especially if `max_new_tokens` are unreasonably high. Unfortunately, it can't be done simply, just by trimming the prompt to be short enough. This way will lead to sometimes splitting the prompt in the middle of an image embedding, which usually breaks the generation. Therefore, in this case, the entire image needs to be removed from input. This is done inside `MultimodalEmbedder._encode_text(...)` function.
|
53 |
+
|
54 |
+
**After the tokenization, the tokens need to get embedded**, the text and images are once again treated separately.
|
55 |
+
|
56 |
+
The text parts are turned to embeddings, using `AbstractMultimodalPipeline.embed_tokens(...)` function. It uses standard embedding function from the model, but to support many LLMs, the actual function is returned by the pipeline (as it might be different for different LLMs), for LLaMA it is `shared.model.model.embed_tokens(...)`.
|
57 |
+
|
58 |
+
The image parts are turned to embeddings, using `AbstractMultimodalPipeline.embed_images(...)` function. This function is specific for a given pipeline, it takes the images as input, forwards them through vision model/projector, and returns the embeddings.
|
59 |
+
|
60 |
+
**Now, the returned embeddings are stitched together**, using `torch.cat()`, this is creating the final input to the LLM.
|
61 |
+
|
62 |
+
## Pipelines
|
63 |
+
|
64 |
+
All of the pipelines should subclass `AbstractMultimodalPipeline` class. The idea is to allow for new pipelines to be added in the same way as user extensions - git clone into `extensions/multimodal/pipelines`.
|
65 |
+
|
66 |
+
The pipelines are the description of the vision part, containing vision model/multimodal projector. All of the pipelines should have an unique `name()`, which is then selected by user, in `--multimodal-pipeline` CLI argument. For an example, see `pipelines/llava/llava.py`.
|
67 |
+
|
68 |
+
## Pipeline modules
|
69 |
+
|
70 |
+
Pipelines are organized into "pipeline modules" - subdirectories in `pipelines` directory. The pipeline modules should contain a file called `pipelines.py`, that should contain the following fields:
|
71 |
+
- `available_pipelines: List[str]` - list of pipelines provided by this module, shown as the list of available pipelines to the user
|
72 |
+
- `def get_pipeline(name: str, params: dict) -> Optional[AbstractMultimodalPipeline]`: - a function to get a concrete pipeline by `name`, if `name` doesn't match any, should return `None`. `params` is the user settings for multimodal extension
|
73 |
+
- `def get_pipeline_from_model_name(model_name: str, params: dict) -> Optional[AbstractMultimodalPipeline]`: - a function to get a pipeline from `model_name`, should be eager to return `None`, unless the determination can be done clearly (for example: minigpt-4 bases on vicuna - it should never return the pipeline, but llava can, as it has its own specific LLM finetune)
|
74 |
+
|
75 |
+
**NOTE**: A pipeline module should lazy-import the pipelines only when necessary, and it should keep its imports to minimum
|
76 |
+
|
77 |
+
## Pipeline params
|
78 |
+
|
79 |
+
The pipelines will get the extension `params` in the constructor. They should honor the following fields:
|
80 |
+
- `vision_device` - string, specifying `torch.device` to run the vision model (CLIP/ViT) on
|
81 |
+
- `vision_bits` - int, number of fp bits to load the vision model(s) in
|
82 |
+
- `projector_device` - string, specifying `torch.device` to run the projector models (Linear layers, QFormer, etc.) on
|
83 |
+
- `projector_bits` - int, number of fp bits to load the projector models in
|
84 |
+
|
85 |
+
As a helper, `AbstractMultimodalPipeline` has `_get_device(self, setting_name: str, params: dict)` and `_get_dtype(self, setting_name: str, params: dict)` helper functions, which parse string/int and return `torch.device` / `torch.dtype`.
|
extensions/multimodal/README.md
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Multimodal
|
2 |
+
|
3 |
+
## Description
|
4 |
+
|
5 |
+
Adds support for multimodality (text+images) to text-generation-webui.
|
6 |
+
|
7 |
+
Note: multimodal currently only works for transformers, AutoGPTQ, and GPTQ-for-LLaMa loaders. ExLlama (v1 and v2) and llama.cpp support are planned.
|
8 |
+
|
9 |
+
https://user-images.githubusercontent.com/3718215/233817203-69b57e77-0c55-4fd6-b742-3204bb13b8fc.mp4
|
10 |
+
|
11 |
+
## Usage
|
12 |
+
|
13 |
+
To run this extension, download a LLM that supports multimodality, and then start server.py with the appropriate `--multimodal-pipeline` argument. Examples:
|
14 |
+
|
15 |
+
```
|
16 |
+
# LLaVA 1.5 13B has the best performance
|
17 |
+
python server.py --model liuhaotian_llava-v1.5-13b --multimodal-pipeline llava-v1.5-13b --load-in-4bit
|
18 |
+
# LLaVA 1.5 7B is relatively weaker, but requires less memory
|
19 |
+
python server.py --model liuhaotian_llava-v1.5-7b --multimodal-pipeline llava-v1.5-7b --load-in-4bit
|
20 |
+
python server.py --model TheBloke_llava-v1.5-13B-GPTQ_gptq-4bit-32g-actorder_True --multimodal-pipeline llava-v1.5-13b --disable_exllama --loader autogptq
|
21 |
+
python server.py --model wojtab_llava-7b-v0-4bit-128g --multimodal-pipeline llava-7b
|
22 |
+
python server.py --model wojtab_llava-13b-v0-4bit-128g --multimodal-pipeline llava-13b
|
23 |
+
python server.py --model anon8231489123_vicuna-13b-GPTQ-4bit-128g --multimodal-pipeline minigpt4-13b
|
24 |
+
python server.py --model llama-7b-4bit --multimodal-pipeline minigpt4-7b
|
25 |
+
```
|
26 |
+
|
27 |
+
There is built-in support for LLaVA-v0-13B, LLaVA-v0-7b, and LLaVA-v1.5-13B. To install `minigpt4`:
|
28 |
+
|
29 |
+
- clone https://github.com/Wojtab/minigpt-4-pipeline into `extensions/multimodal/pipelines`
|
30 |
+
- install the requirements.txt
|
31 |
+
|
32 |
+
The same procedure should be used to install other pipelines, which can then be used with `--multimodal-pipeline [pipeline name]`. For additional multimodal pipelines refer to the compatibility section below.
|
33 |
+
|
34 |
+
Do note, that each image takes up a considerable amount of tokens, so adjust `max_new_tokens` to be at most 1700 (recommended value is between 200 to 500), so the images don't get truncated.
|
35 |
+
|
36 |
+
To send an image, just upload it to the extension field below chat, and send a prompt as always. The image will be added to the end of your message. If you wish to modify the placement, include a string `<image>` in your prompt.
|
37 |
+
|
38 |
+
Additionally, there is *Embed all images, not only the last one* checkbox. It modifies the image embeddings, by default (if it's unchecked), all but the most recent images have their embeddings empty, so they are not fed to the network. It seems as if some multimodal networks consider the features in all images at the same time as if they were a single image. Due to this behavior, by default, the extension skips previous images. However, it can lead to sub-par generation on other pipelines. If you want to include all images, just tick this checkbox.
|
39 |
+
|
40 |
+
## Compatibility
|
41 |
+
|
42 |
+
As of now, the following multimodal pipelines are supported:
|
43 |
+
|Pipeline|`--multimodal-pipeline`|Default LLM|LLM info(for the linked model)|Pipeline repository|
|
44 |
+
|-|-|-|-|-|
|
45 |
+
|[LLaVA 13B](https://github.com/haotian-liu/LLaVA)|`llava-13b`|[LLaVA 13B](https://huggingface.co/wojtab/llava-13b-v0-4bit-128g)|GPTQ 4-bit quant, old CUDA|built-in|
|
46 |
+
|[LLaVA 7B](https://github.com/haotian-liu/LLaVA)|`llava-7b`|[LLaVA 7B](https://huggingface.co/wojtab/llava-7b-v0-4bit-128g)|GPTQ 4-bit quant, old CUDA|built-in|
|
47 |
+
|[MiniGPT-4 7B](https://github.com/Vision-CAIR/MiniGPT-4)|`minigpt4-7b`|[Vicuna v0 7B](https://huggingface.co/TheBloke/vicuna-7B-GPTQ-4bit-128g)|GPTQ 4-bit quant, new format|[Wojtab/minigpt-4-pipeline](https://github.com/Wojtab/minigpt-4-pipeline)|
|
48 |
+
|[MiniGPT-4 13B](https://github.com/Vision-CAIR/MiniGPT-4)|`minigpt4-13b`|[Vicuna v0 13B](https://huggingface.co/anon8231489123/vicuna-13b-GPTQ-4bit-128g)|GPTQ 4-bit quant, old CUDA|[Wojtab/minigpt-4-pipeline](https://github.com/Wojtab/minigpt-4-pipeline)|
|
49 |
+
|[InstructBLIP 7B](https://github.com/salesforce/LAVIS/tree/main/projects/instructblip)|`instructblip-7b`|[Vicuna v1.1 7B](https://huggingface.co/TheBloke/vicuna-7B-1.1-GPTQ-4bit-128g)|GPTQ 4-bit quant|[kjerk/instructblip-pipeline](https://github.com/kjerk/instructblip-pipeline)|
|
50 |
+
|[InstructBLIP 13B](https://github.com/salesforce/LAVIS/tree/main/projects/instructblip)|`instructblip-13b`|[Vicuna v1.1 13B](https://huggingface.co/TheBloke/vicuna-13B-1.1-GPTQ-4bit-128g)|GPTQ 4-bit quant|[kjerk/instructblip-pipeline](https://github.com/kjerk/instructblip-pipeline)|
|
51 |
+
|
52 |
+
Some pipelines could support different LLMs but do note that while it might work, it isn't a supported configuration.
|
53 |
+
|
54 |
+
DO NOT report bugs if you are using a different LLM.
|
55 |
+
|
56 |
+
DO NOT report bugs with pipelines in this repository (unless they are built-in)
|
57 |
+
|
58 |
+
## Extension config
|
59 |
+
This extension uses the following parameters (from `settings.json`):
|
60 |
+
|Parameter|Description|
|
61 |
+
|---------|-----------|
|
62 |
+
|`multimodal-vision_bits`|Number of bits to load vision models (CLIP/ViT) feature extractor in (most pipelines should support either 32 or 16, default=32)|
|
63 |
+
|`multimodal-vision_device`|Torch device to run the feature extractor on, for example, `cpu` or `cuda:0`, by default `cuda:0` if available|
|
64 |
+
|`multimodal-projector_bits`|Number of bits to load feature projector model(s) in (most pipelines should support either 32 or 16, default=32)|
|
65 |
+
|`multimodal-projector_device`|Torch device to run the feature projector model(s) on, for example `cpu` or `cuda:0`, by default `cuda:0` if available|
|
66 |
+
|`multimodal-add_all_images_to_prompt`|Default value of "Embed all images, not only the last one" checkbox|
|
67 |
+
|
68 |
+
## Usage through API
|
69 |
+
|
70 |
+
### Chat completions endpoint
|
71 |
+
|
72 |
+
#### With an image URL
|
73 |
+
|
74 |
+
```shell
|
75 |
+
curl http://127.0.0.1:5000/v1/chat/completions \
|
76 |
+
-H "Content-Type: application/json" \
|
77 |
+
-d '{
|
78 |
+
"messages": [
|
79 |
+
{
|
80 |
+
"role": "user",
|
81 |
+
"image_url": "https://avatars.githubusercontent.com/u/112222186?v=4"
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"role": "user",
|
85 |
+
"content": "What is unusual about this image?"
|
86 |
+
}
|
87 |
+
]
|
88 |
+
}'
|
89 |
+
```
|
90 |
+
|
91 |
+
#### With a Base64 image
|
92 |
+
|
93 |
+
```python
|
94 |
+
import base64
|
95 |
+
import json
|
96 |
+
import requests
|
97 |
+
|
98 |
+
img = open('image.jpg', 'rb')
|
99 |
+
img_bytes = img.read()
|
100 |
+
img_base64 = base64.b64encode(img_bytes).decode('utf-8')
|
101 |
+
data = { "messages": [
|
102 |
+
{
|
103 |
+
"role": "user",
|
104 |
+
"image_url": f"data:image/jpeg;base64,{img_base64}"
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"role": "user",
|
108 |
+
"content": "what is unusual about this image?"
|
109 |
+
}
|
110 |
+
]
|
111 |
+
}
|
112 |
+
response = requests.post('http://127.0.0.1:5000/v1/chat/completions', json=data)
|
113 |
+
print(response.text)
|
114 |
+
```
|
115 |
+
|
116 |
+
You can run the multimodal inference through API, by inputting the images to prompt. Images are embedded like so: `f'<img src="data:image/jpeg;base64,{img_str}">'`, where `img_str` is base-64 jpeg data. Note that you will need to launch `server.py` with the arguments `--api --extensions multimodal`.
|
117 |
+
|
118 |
+
### Completions endpoint
|
119 |
+
|
120 |
+
Python example:
|
121 |
+
|
122 |
+
```Python
|
123 |
+
import base64
|
124 |
+
import requests
|
125 |
+
|
126 |
+
CONTEXT = "You are LLaVA, a large language and vision assistant trained by UW Madison WAIV Lab. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language. Follow the instructions carefully and explain your answers in detail.### Human: Hi!### Assistant: Hi there! How can I help you today?\n"
|
127 |
+
|
128 |
+
with open('extreme_ironing.jpg', 'rb') as f:
|
129 |
+
img_str = base64.b64encode(f.read()).decode('utf-8')
|
130 |
+
prompt = CONTEXT + f'### Human: What is unusual about this image: \n<img src="data:image/jpeg;base64,{img_str}">### Assistant: '
|
131 |
+
print(requests.post('http://127.0.0.1:5000/v1/completions', json={'prompt': prompt, 'max_tokens': 200, 'stop': ['\n###']}).json())
|
132 |
+
```
|
133 |
+
script output:
|
134 |
+
```Python
|
135 |
+
{'results': [{'text': "The unusual aspect of this image is that a man is standing on top of a yellow minivan while doing his laundry. He has set up a makeshift clothes line using the car's rooftop as an outdoor drying area. This scene is uncommon because people typically do their laundry indoors, in a dedicated space like a laundromat or a room in their home, rather than on top of a moving vehicle. Additionally, hanging clothes on the car could be potentially hazardous or illegal in some jurisdictions due to the risk of damaging the vehicle or causing accidents on the road.\n##"}]}
|
136 |
+
```
|
137 |
+
|
138 |
+
## For pipeline developers/technical description
|
139 |
+
see [DOCS.md](https://github.com/oobabooga/text-generation-webui/blob/main/extensions/multimodal/DOCS.md)
|
extensions/multimodal/abstract_pipeline.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC, abstractmethod
|
2 |
+
from typing import List, Optional
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from PIL import Image
|
6 |
+
from transformers import is_torch_xpu_available
|
7 |
+
|
8 |
+
|
9 |
+
class AbstractMultimodalPipeline(ABC):
|
10 |
+
@staticmethod
|
11 |
+
@abstractmethod
|
12 |
+
def name() -> str:
|
13 |
+
'name of the pipeline, should be same as in --multimodal-pipeline'
|
14 |
+
pass
|
15 |
+
|
16 |
+
@staticmethod
|
17 |
+
@abstractmethod
|
18 |
+
def image_start() -> Optional[str]:
|
19 |
+
'return image start string, string representation of image start token, or None if not applicable'
|
20 |
+
pass
|
21 |
+
|
22 |
+
@staticmethod
|
23 |
+
@abstractmethod
|
24 |
+
def image_end() -> Optional[str]:
|
25 |
+
'return image end string, string representation of image end token, or None if not applicable'
|
26 |
+
pass
|
27 |
+
|
28 |
+
@staticmethod
|
29 |
+
@abstractmethod
|
30 |
+
def placeholder_token_id() -> int:
|
31 |
+
'return placeholder token id'
|
32 |
+
pass
|
33 |
+
|
34 |
+
@staticmethod
|
35 |
+
@abstractmethod
|
36 |
+
def num_image_embeds() -> int:
|
37 |
+
'return the number of embeds used by a single image (for example: 256 for LLaVA)'
|
38 |
+
pass
|
39 |
+
|
40 |
+
@abstractmethod
|
41 |
+
def embed_images(self, images: List[Image.Image]) -> torch.Tensor:
|
42 |
+
'forward the images through vision pipeline, and return their embeddings'
|
43 |
+
pass
|
44 |
+
|
45 |
+
@staticmethod
|
46 |
+
@abstractmethod
|
47 |
+
def embed_tokens(input_ids: torch.Tensor) -> torch.Tensor:
|
48 |
+
'embed tokens, the exact function varies by LLM, for LLaMA it is `shared.model.model.embed_tokens`'
|
49 |
+
pass
|
50 |
+
|
51 |
+
@staticmethod
|
52 |
+
@abstractmethod
|
53 |
+
def placeholder_embeddings() -> torch.Tensor:
|
54 |
+
'get placeholder embeddings if there are multiple images, and `add_all_images_to_prompt` is False'
|
55 |
+
pass
|
56 |
+
|
57 |
+
def _get_device(self, setting_name: str, params: dict):
|
58 |
+
if params[setting_name] is None:
|
59 |
+
return torch.device("cuda:0" if torch.cuda.is_available() else "xpu:0" if is_torch_xpu_available() else "cpu")
|
60 |
+
return torch.device(params[setting_name])
|
61 |
+
|
62 |
+
def _get_dtype(self, setting_name: str, params: dict):
|
63 |
+
return torch.float32 if int(params[setting_name]) == 32 else torch.float16
|
extensions/multimodal/multimodal_embedder.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import re
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from io import BytesIO
|
5 |
+
from typing import Any, List, Optional
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
from extensions.multimodal.pipeline_loader import load_pipeline
|
11 |
+
from modules import shared
|
12 |
+
from modules.logging_colors import logger
|
13 |
+
from modules.text_generation import encode, get_max_prompt_length
|
14 |
+
|
15 |
+
|
16 |
+
@dataclass
|
17 |
+
class PromptPart:
|
18 |
+
text: str
|
19 |
+
image: Optional[Image.Image] = None
|
20 |
+
is_image: bool = False
|
21 |
+
input_ids: Optional[torch.Tensor] = None
|
22 |
+
embedding: Optional[torch.Tensor] = None
|
23 |
+
|
24 |
+
|
25 |
+
class MultimodalEmbedder:
|
26 |
+
def __init__(self, params: dict):
|
27 |
+
pipeline, source = load_pipeline(params)
|
28 |
+
self.pipeline = pipeline
|
29 |
+
logger.info(f'Multimodal: loaded pipeline {self.pipeline.name()} from pipelines/{source} ({self.pipeline.__class__.__name__})')
|
30 |
+
|
31 |
+
def _split_prompt(self, prompt: str, load_images: bool = False) -> List[PromptPart]:
|
32 |
+
"""Splits a prompt into a list of `PromptParts` to separate image data from text.
|
33 |
+
It will also append `image_start` and `image_end` before and after the image, and optionally parse and load the images,
|
34 |
+
if `load_images` is `True`.
|
35 |
+
"""
|
36 |
+
parts: List[PromptPart] = []
|
37 |
+
curr = 0
|
38 |
+
while True:
|
39 |
+
match = re.search(r'<img src="data:image/jpeg;base64,([A-Za-z0-9+/=]+)">', prompt[curr:])
|
40 |
+
if match is None:
|
41 |
+
# no more image tokens, append the rest of the prompt
|
42 |
+
if curr > 0:
|
43 |
+
# add image end token after last image
|
44 |
+
parts.append(PromptPart(text=self.pipeline.image_end() + prompt[curr:]))
|
45 |
+
else:
|
46 |
+
parts.append(PromptPart(text=prompt))
|
47 |
+
break
|
48 |
+
# found an image, append image start token to the text
|
49 |
+
if match.start() > 0:
|
50 |
+
parts.append(PromptPart(text=prompt[curr:curr + match.start()] + self.pipeline.image_start()))
|
51 |
+
else:
|
52 |
+
parts.append(PromptPart(text=self.pipeline.image_start()))
|
53 |
+
# append the image
|
54 |
+
parts.append(PromptPart(
|
55 |
+
text=match.group(0),
|
56 |
+
image=Image.open(BytesIO(base64.b64decode(match.group(1)))) if load_images else None,
|
57 |
+
is_image=True
|
58 |
+
))
|
59 |
+
curr += match.end()
|
60 |
+
return parts
|
61 |
+
|
62 |
+
def _len_in_tokens_prompt_parts(self, parts: List[PromptPart]) -> int:
|
63 |
+
"""Total length in tokens of all `parts`"""
|
64 |
+
tokens = 0
|
65 |
+
for part in parts:
|
66 |
+
if part.is_image:
|
67 |
+
tokens += self.pipeline.num_image_embeds()
|
68 |
+
elif part.input_ids is not None:
|
69 |
+
tokens += len(part.input_ids)
|
70 |
+
else:
|
71 |
+
tokens += len(encode(part.text)[0])
|
72 |
+
return tokens
|
73 |
+
|
74 |
+
def len_in_tokens(self, prompt: str) -> int:
|
75 |
+
"""Total length in tokens for a given text `prompt`"""
|
76 |
+
parts = self._split_prompt(prompt, False)
|
77 |
+
return self._len_in_tokens_prompt_parts(parts)
|
78 |
+
|
79 |
+
def _encode_single_text(self, part: PromptPart, add_bos_token: bool) -> PromptPart:
|
80 |
+
"""Encode a single prompt `part` to `input_ids`. Returns a `PromptPart`"""
|
81 |
+
if part.is_image:
|
82 |
+
placeholders = torch.ones((self.pipeline.num_image_embeds())) * self.pipeline.placeholder_token_id()
|
83 |
+
part.input_ids = placeholders.to(shared.model.device, dtype=torch.int64)
|
84 |
+
else:
|
85 |
+
part.input_ids = encode(part.text, add_bos_token=add_bos_token)[0].to(shared.model.device, dtype=torch.int64)
|
86 |
+
return part
|
87 |
+
|
88 |
+
@staticmethod
|
89 |
+
def _num_images(parts: List[PromptPart]) -> int:
|
90 |
+
count = 0
|
91 |
+
for part in parts:
|
92 |
+
if part.is_image:
|
93 |
+
count += 1
|
94 |
+
return count
|
95 |
+
|
96 |
+
def _encode_text(self, state, parts: List[PromptPart]) -> List[PromptPart]:
|
97 |
+
"""Encode text to token_ids, also truncate the prompt, if necessary.
|
98 |
+
|
99 |
+
The chat/instruct mode should make prompts that fit in get_max_prompt_length, but if max_new_tokens are set
|
100 |
+
such that the context + min_rows don't fit, we can get a prompt which is too long.
|
101 |
+
We can't truncate image embeddings, as it leads to broken generation, so remove the images instead and warn the user
|
102 |
+
"""
|
103 |
+
encoded: List[PromptPart] = []
|
104 |
+
for i, part in enumerate(parts):
|
105 |
+
encoded.append(self._encode_single_text(part, i == 0 and state['add_bos_token']))
|
106 |
+
|
107 |
+
# truncation:
|
108 |
+
max_len = get_max_prompt_length(state)
|
109 |
+
removed_images = 0
|
110 |
+
|
111 |
+
# 1. remove entire text/image blocks
|
112 |
+
while self._len_in_tokens_prompt_parts(encoded[1:]) > max_len:
|
113 |
+
if encoded[0].is_image:
|
114 |
+
removed_images += 1
|
115 |
+
encoded = encoded[1:]
|
116 |
+
|
117 |
+
# 2. check if the last prompt part doesn't need to get truncated
|
118 |
+
if self._len_in_tokens_prompt_parts(encoded) > max_len:
|
119 |
+
if encoded[0].is_image:
|
120 |
+
# don't truncate image embeddings, just remove the image, otherwise generation will be broken
|
121 |
+
removed_images += 1
|
122 |
+
encoded = encoded[1:]
|
123 |
+
elif len(encoded) > 1 and encoded[0].text.endswith(self.pipeline.image_start()):
|
124 |
+
# see if we can keep image_start token
|
125 |
+
len_image_start = len(encode(self.pipeline.image_start(), add_bos_token=state['add_bos_token'])[0])
|
126 |
+
if self._len_in_tokens_prompt_parts(encoded[1:]) + len_image_start > max_len:
|
127 |
+
# we can't -> remove this text, and the image
|
128 |
+
encoded = encoded[2:]
|
129 |
+
removed_images += 1
|
130 |
+
else:
|
131 |
+
# we can -> just truncate the text
|
132 |
+
trunc_len = self._len_in_tokens_prompt_parts(encoded) - max_len
|
133 |
+
encoded[0].input_ids = encoded[0].input_ids[trunc_len:]
|
134 |
+
elif len(encoded) > 0:
|
135 |
+
# only one text left, truncate it normally
|
136 |
+
trunc_len = self._len_in_tokens_prompt_parts(encoded) - max_len
|
137 |
+
encoded[0].input_ids = encoded[0].input_ids[trunc_len:]
|
138 |
+
|
139 |
+
# notify user if we truncated an image
|
140 |
+
if removed_images > 0:
|
141 |
+
logger.warning(f"Multimodal: removed {removed_images} image(s) from prompt. Try decreasing max_new_tokens if generation is broken")
|
142 |
+
|
143 |
+
return encoded
|
144 |
+
|
145 |
+
def _embed(self, parts: List[PromptPart]) -> List[PromptPart]:
|
146 |
+
# batch images
|
147 |
+
image_indicies = [i for i, part in enumerate(parts) if part.is_image]
|
148 |
+
embedded = self.pipeline.embed_images([parts[i].image for i in image_indicies])
|
149 |
+
for i, embeds in zip(image_indicies, embedded):
|
150 |
+
parts[i].embedding = embeds
|
151 |
+
# embed text
|
152 |
+
for (i, part) in enumerate(parts):
|
153 |
+
if not part.is_image:
|
154 |
+
parts[i].embedding = self.pipeline.embed_tokens(part.input_ids)
|
155 |
+
return parts
|
156 |
+
|
157 |
+
def _remove_old_images(self, parts: List[PromptPart], params: dict) -> List[PromptPart]:
|
158 |
+
if params['add_all_images_to_prompt']:
|
159 |
+
return parts
|
160 |
+
already_added = False
|
161 |
+
for i, part in reversed(list(enumerate(parts))):
|
162 |
+
if part.is_image:
|
163 |
+
if already_added:
|
164 |
+
parts[i].embedding = self.pipeline.placeholder_embeddings()
|
165 |
+
else:
|
166 |
+
already_added = True
|
167 |
+
return parts
|
168 |
+
|
169 |
+
def forward(self, prompt: str, state: Any, params: dict):
|
170 |
+
prompt_parts = self._split_prompt(prompt, True)
|
171 |
+
prompt_parts = self._encode_text(state, prompt_parts)
|
172 |
+
prompt_parts = self._embed(prompt_parts)
|
173 |
+
prompt_parts = self._remove_old_images(prompt_parts, params)
|
174 |
+
embeds = tuple(part.embedding for part in prompt_parts)
|
175 |
+
ids = tuple(part.input_ids for part in prompt_parts)
|
176 |
+
input_embeds = torch.cat(embeds, dim=0)
|
177 |
+
input_ids = torch.cat(ids, dim=0)
|
178 |
+
return prompt, input_ids, input_embeds, self._num_images(prompt_parts)
|
extensions/multimodal/pipeline_loader.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import traceback
|
2 |
+
from importlib import import_module
|
3 |
+
from pathlib import Path
|
4 |
+
from typing import Tuple
|
5 |
+
|
6 |
+
from extensions.multimodal.abstract_pipeline import AbstractMultimodalPipeline
|
7 |
+
from modules import shared
|
8 |
+
from modules.logging_colors import logger
|
9 |
+
|
10 |
+
|
11 |
+
def _get_available_pipeline_modules():
|
12 |
+
pipeline_path = Path(__file__).parent / 'pipelines'
|
13 |
+
modules = [p for p in pipeline_path.iterdir() if p.is_dir()]
|
14 |
+
return [m.name for m in modules if (m / 'pipelines.py').exists()]
|
15 |
+
|
16 |
+
|
17 |
+
def load_pipeline(params: dict) -> Tuple[AbstractMultimodalPipeline, str]:
|
18 |
+
pipeline_modules = {}
|
19 |
+
available_pipeline_modules = _get_available_pipeline_modules()
|
20 |
+
for name in available_pipeline_modules:
|
21 |
+
try:
|
22 |
+
pipeline_modules[name] = import_module(f'extensions.multimodal.pipelines.{name}.pipelines')
|
23 |
+
except:
|
24 |
+
logger.warning(f'Failed to get multimodal pipelines from {name}')
|
25 |
+
logger.warning(traceback.format_exc())
|
26 |
+
|
27 |
+
if shared.args.multimodal_pipeline is not None:
|
28 |
+
for k in pipeline_modules:
|
29 |
+
if hasattr(pipeline_modules[k], 'get_pipeline'):
|
30 |
+
pipeline = getattr(pipeline_modules[k], 'get_pipeline')(shared.args.multimodal_pipeline, params)
|
31 |
+
if pipeline is not None:
|
32 |
+
return (pipeline, k)
|
33 |
+
else:
|
34 |
+
model_name = shared.args.model.lower()
|
35 |
+
for k in pipeline_modules:
|
36 |
+
if hasattr(pipeline_modules[k], 'get_pipeline_from_model_name'):
|
37 |
+
pipeline = getattr(pipeline_modules[k], 'get_pipeline_from_model_name')(model_name, params)
|
38 |
+
if pipeline is not None:
|
39 |
+
return (pipeline, k)
|
40 |
+
|
41 |
+
available = []
|
42 |
+
for k in pipeline_modules:
|
43 |
+
if hasattr(pipeline_modules[k], 'available_pipelines'):
|
44 |
+
pipelines = getattr(pipeline_modules[k], 'available_pipelines')
|
45 |
+
available += pipelines
|
46 |
+
|
47 |
+
if shared.args.multimodal_pipeline is not None:
|
48 |
+
log = f'Multimodal - ERROR: Failed to load multimodal pipeline "{shared.args.multimodal_pipeline}", available pipelines are: {available}.'
|
49 |
+
else:
|
50 |
+
log = f'Multimodal - ERROR: Failed to determine multimodal pipeline for model {shared.args.model}, please select one manually using --multimodal-pipeline [PIPELINE]. Available pipelines are: {available}.'
|
51 |
+
logger.critical(f'{log} Please specify a correct pipeline, or disable the extension')
|
52 |
+
raise RuntimeError(f'{log} Please specify a correct pipeline, or disable the extension')
|
extensions/multimodal/pipelines/llava/README.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## LLaVA pipeline
|
2 |
+
|
3 |
+
This module provides 2 pipelines:
|
4 |
+
- `llava-7b` - for use with LLaVA v0 7B model (finetuned LLaMa 7B)
|
5 |
+
- `llava-13b` - for use with LLaVA v0 13B model (finetuned LLaMa 13B)
|
6 |
+
|
7 |
+
[LLaVA](https://github.com/haotian-liu/LLaVA) uses CLIP `openai/clip-vit-large-patch14` as the vision model, and then a single linear layer. For 13B the projector weights are in `liuhaotian/LLaVA-13b-delta-v0`, and for 7B they are in `liuhaotian/LLaVA-7b-delta-v0`.
|
8 |
+
|
9 |
+
The supported parameter combinations for both the vision model, and the projector are: CUDA/32bit, CUDA/16bit, CPU/32bit
|
extensions/multimodal/pipelines/llava/llava.py
ADDED
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from abc import abstractmethod
|
3 |
+
from typing import List, Tuple
|
4 |
+
|
5 |
+
import torch
|
6 |
+
from huggingface_hub import hf_hub_download
|
7 |
+
from PIL import Image
|
8 |
+
from transformers import CLIPImageProcessor, CLIPVisionModel
|
9 |
+
|
10 |
+
from extensions.multimodal.abstract_pipeline import AbstractMultimodalPipeline
|
11 |
+
from modules import shared
|
12 |
+
from modules.logging_colors import logger
|
13 |
+
from modules.text_generation import encode
|
14 |
+
|
15 |
+
|
16 |
+
def expand2square(pil_img: Image.Image, background_color: Tuple[int]) -> Image.Image:
|
17 |
+
width, height = pil_img.size
|
18 |
+
if width == height:
|
19 |
+
return pil_img
|
20 |
+
elif width > height:
|
21 |
+
result = Image.new(pil_img.mode, (width, width), background_color)
|
22 |
+
result.paste(pil_img, (0, (width - height) // 2))
|
23 |
+
return result
|
24 |
+
else:
|
25 |
+
result = Image.new(pil_img.mode, (height, height), background_color)
|
26 |
+
result.paste(pil_img, ((height - width) // 2, 0))
|
27 |
+
return result
|
28 |
+
|
29 |
+
|
30 |
+
class LLaVA_v0_Pipeline(AbstractMultimodalPipeline):
|
31 |
+
CLIP_REPO = "openai/clip-vit-large-patch14"
|
32 |
+
|
33 |
+
def __init__(self, params: dict) -> None:
|
34 |
+
super().__init__()
|
35 |
+
self.clip_device = self._get_device("vision_device", params)
|
36 |
+
self.clip_dtype = self._get_dtype("vision_bits", params)
|
37 |
+
self.projector_device = self._get_device("projector_device", params)
|
38 |
+
self.projector_dtype = self._get_dtype("projector_bits", params)
|
39 |
+
self.image_processor, self.vision_tower, self.mm_projector = self._load_models()
|
40 |
+
|
41 |
+
def _load_models(self):
|
42 |
+
start_ts = time.time()
|
43 |
+
|
44 |
+
logger.info(f"LLaVA - Loading CLIP from {self.CLIP_REPO} as {self.clip_dtype} on {self.clip_device}...")
|
45 |
+
image_processor = CLIPImageProcessor.from_pretrained(self.CLIP_REPO, torch_dtype=self.clip_dtype)
|
46 |
+
vision_tower = CLIPVisionModel.from_pretrained(self.CLIP_REPO, torch_dtype=self.clip_dtype).to(self.clip_device)
|
47 |
+
|
48 |
+
logger.info(f"LLaVA - Loading projector from {self.llava_projector_repo()} as {self.projector_dtype} on {self.projector_device}...")
|
49 |
+
projector_path = hf_hub_download(self.llava_projector_repo(), self.llava_projector_filename())
|
50 |
+
mm_projector = self.build_mm_projector()
|
51 |
+
projector_data = torch.load(projector_path)
|
52 |
+
projector_data = {k[19:]: v for k, v in projector_data.items() if k.startswith('model.mm_projector.')}
|
53 |
+
mm_projector.load_state_dict(projector_data)
|
54 |
+
mm_projector = mm_projector.to(self.projector_device)
|
55 |
+
|
56 |
+
logger.info(f"LLaVA supporting models loaded, took {time.time() - start_ts:.2f} seconds")
|
57 |
+
return image_processor, vision_tower, mm_projector
|
58 |
+
|
59 |
+
def build_mm_projector(self) -> torch.nn.Module:
|
60 |
+
projector_shape = self.llava_projector_shape()
|
61 |
+
if len(projector_shape) == 2:
|
62 |
+
return torch.nn.Linear(*projector_shape)
|
63 |
+
else:
|
64 |
+
modules = []
|
65 |
+
modules.append(torch.nn.Linear(projector_shape[0], projector_shape[1]))
|
66 |
+
for i in range(2, len(projector_shape)):
|
67 |
+
modules.append(torch.nn.GELU())
|
68 |
+
modules.append(torch.nn.Linear(projector_shape[i-1], projector_shape[i]))
|
69 |
+
return torch.nn.Sequential(*modules)
|
70 |
+
|
71 |
+
@staticmethod
|
72 |
+
def image_start() -> str:
|
73 |
+
return "<im_start>"
|
74 |
+
|
75 |
+
@staticmethod
|
76 |
+
def image_end() -> str:
|
77 |
+
return "<im_end>"
|
78 |
+
|
79 |
+
@staticmethod
|
80 |
+
def num_image_embeds() -> int:
|
81 |
+
return 256
|
82 |
+
|
83 |
+
@staticmethod
|
84 |
+
def embed_tokens(input_ids: torch.Tensor) -> torch.Tensor:
|
85 |
+
for attr in ['', 'model', 'model.model', 'model.model.model']:
|
86 |
+
tmp = getattr(shared.model, attr, None) if attr != '' else shared.model
|
87 |
+
if tmp is not None and hasattr(tmp, 'embed_tokens'):
|
88 |
+
func = tmp.embed_tokens
|
89 |
+
break
|
90 |
+
else:
|
91 |
+
raise ValueError('The embed_tokens method has not been found for this loader.')
|
92 |
+
|
93 |
+
return func(input_ids).to(shared.model.device, dtype=shared.model.dtype)
|
94 |
+
|
95 |
+
@staticmethod
|
96 |
+
def placeholder_embeddings() -> torch.Tensor:
|
97 |
+
return LLaVA_v0_Pipeline.embed_tokens(encode("<im_patch>"*256, add_bos_token=False)[0])
|
98 |
+
|
99 |
+
def embed_images(self, images: List[Image.Image]) -> torch.Tensor:
|
100 |
+
images = self.image_processor(images, return_tensors='pt')['pixel_values']
|
101 |
+
images = images.to(self.clip_device, dtype=self.clip_dtype)
|
102 |
+
|
103 |
+
with torch.no_grad():
|
104 |
+
image_forward_outs = self.vision_tower(images, output_hidden_states=True)
|
105 |
+
select_hidden_state_layer = -2
|
106 |
+
select_hidden_state = image_forward_outs.hidden_states[select_hidden_state_layer]
|
107 |
+
image_features = select_hidden_state[:, 1:].to(self.projector_device, dtype=self.projector_dtype)
|
108 |
+
image_features = self.mm_projector(image_features)
|
109 |
+
return image_features.to(shared.model.device, dtype=shared.model.dtype)
|
110 |
+
|
111 |
+
@staticmethod
|
112 |
+
@abstractmethod
|
113 |
+
def llava_projector_repo() -> str:
|
114 |
+
pass
|
115 |
+
|
116 |
+
@staticmethod
|
117 |
+
@abstractmethod
|
118 |
+
def llava_projector_filename() -> str:
|
119 |
+
pass
|
120 |
+
|
121 |
+
@staticmethod
|
122 |
+
@abstractmethod
|
123 |
+
def llava_projector_shape() -> Tuple[int, int]:
|
124 |
+
pass
|
125 |
+
|
126 |
+
|
127 |
+
class LLaVA_v0_13B_Pipeline(LLaVA_v0_Pipeline):
|
128 |
+
def __init__(self, params: dict) -> None:
|
129 |
+
super().__init__(params)
|
130 |
+
|
131 |
+
@staticmethod
|
132 |
+
def name() -> str:
|
133 |
+
return "llava-13b"
|
134 |
+
|
135 |
+
@staticmethod
|
136 |
+
def placeholder_token_id() -> int:
|
137 |
+
return 32000
|
138 |
+
|
139 |
+
@staticmethod
|
140 |
+
def llava_projector_shape() -> Tuple[int, int]:
|
141 |
+
return (1024, 5120)
|
142 |
+
|
143 |
+
@staticmethod
|
144 |
+
def llava_projector_filename() -> str:
|
145 |
+
return "mm_projector.bin"
|
146 |
+
|
147 |
+
@staticmethod
|
148 |
+
def llava_projector_repo() -> str:
|
149 |
+
return "liuhaotian/LLaVA-13b-delta-v0"
|
150 |
+
|
151 |
+
|
152 |
+
class LLaVA_v0_7B_Pipeline(LLaVA_v0_Pipeline):
|
153 |
+
def __init__(self, params: dict) -> None:
|
154 |
+
super().__init__(params)
|
155 |
+
|
156 |
+
@staticmethod
|
157 |
+
def name() -> str:
|
158 |
+
return "llava-7b"
|
159 |
+
|
160 |
+
@staticmethod
|
161 |
+
def placeholder_token_id() -> int:
|
162 |
+
return 32001
|
163 |
+
|
164 |
+
@staticmethod
|
165 |
+
def llava_projector_shape() -> Tuple[int, int]:
|
166 |
+
return (1024, 4096)
|
167 |
+
|
168 |
+
@staticmethod
|
169 |
+
def llava_projector_filename() -> str:
|
170 |
+
return "mm_projector.bin"
|
171 |
+
|
172 |
+
@staticmethod
|
173 |
+
def llava_projector_repo() -> str:
|
174 |
+
return "liuhaotian/LLaVA-7b-delta-v0"
|
175 |
+
|
176 |
+
|
177 |
+
class LLaVA_LLaMA_2_13B_Pipeline(LLaVA_v0_13B_Pipeline):
|
178 |
+
def __init__(self, params: dict) -> None:
|
179 |
+
super().__init__(params)
|
180 |
+
|
181 |
+
@staticmethod
|
182 |
+
def name() -> str:
|
183 |
+
return "llava-llama-2-13b"
|
184 |
+
|
185 |
+
@staticmethod
|
186 |
+
def placeholder_token_id() -> int:
|
187 |
+
return 0
|
188 |
+
|
189 |
+
@staticmethod
|
190 |
+
def llava_projector_repo() -> str:
|
191 |
+
return "liuhaotian/llava-llama-2-13b-chat-lightning-preview"
|
192 |
+
|
193 |
+
@staticmethod
|
194 |
+
def image_start() -> str:
|
195 |
+
return ""
|
196 |
+
|
197 |
+
@staticmethod
|
198 |
+
def image_end() -> str:
|
199 |
+
return ""
|
200 |
+
|
201 |
+
@staticmethod
|
202 |
+
def placeholder_embeddings() -> torch.Tensor:
|
203 |
+
return LLaVA_v0_Pipeline.embed_tokens(encode("<unk>"*256, add_bos_token=False)[0])
|
204 |
+
|
205 |
+
|
206 |
+
class LLaVA_v1_5_13B_Pipeline(LLaVA_v0_13B_Pipeline):
|
207 |
+
CLIP_REPO = "openai/clip-vit-large-patch14-336"
|
208 |
+
|
209 |
+
def __init__(self, params: dict) -> None:
|
210 |
+
super().__init__(params)
|
211 |
+
|
212 |
+
@staticmethod
|
213 |
+
def name() -> str:
|
214 |
+
return "llava-v1.5-13b"
|
215 |
+
|
216 |
+
@staticmethod
|
217 |
+
def llava_projector_shape() -> Tuple[int, int]:
|
218 |
+
return (1024, 5120, 5120)
|
219 |
+
|
220 |
+
@staticmethod
|
221 |
+
def placeholder_token_id() -> int:
|
222 |
+
return 0
|
223 |
+
|
224 |
+
@staticmethod
|
225 |
+
def llava_projector_repo() -> str:
|
226 |
+
return "liuhaotian/llava-v1.5-13b"
|
227 |
+
|
228 |
+
@staticmethod
|
229 |
+
def image_start() -> str:
|
230 |
+
return ""
|
231 |
+
|
232 |
+
@staticmethod
|
233 |
+
def image_end() -> str:
|
234 |
+
return ""
|
235 |
+
|
236 |
+
@staticmethod
|
237 |
+
def num_image_embeds() -> int:
|
238 |
+
return 576
|
239 |
+
|
240 |
+
def embed_images(self, images: List[Image.Image]) -> torch.Tensor:
|
241 |
+
# pad it to square first
|
242 |
+
images = [
|
243 |
+
expand2square(image, tuple(int(x*255) for x in self.image_processor.image_mean))
|
244 |
+
for image in images
|
245 |
+
]
|
246 |
+
return super().embed_images(images)
|
247 |
+
|
248 |
+
@staticmethod
|
249 |
+
def placeholder_embeddings() -> torch.Tensor:
|
250 |
+
return LLaVA_v0_Pipeline.embed_tokens(encode("<unk>"*576, add_bos_token=False)[0])
|
251 |
+
|
252 |
+
class LLaVA_v1_5_7B_Pipeline(LLaVA_v1_5_13B_Pipeline):
|
253 |
+
@staticmethod
|
254 |
+
def name() -> str:
|
255 |
+
return "llava-v1.5-7b"
|
256 |
+
|
257 |
+
@staticmethod
|
258 |
+
def llava_projector_shape() -> Tuple[int, int]:
|
259 |
+
return (1024, 4096, 4096)
|
260 |
+
@staticmethod
|
261 |
+
def llava_projector_repo() -> str:
|
262 |
+
return "liuhaotian/llava-v1.5-7b"
|
extensions/multimodal/pipelines/llava/pipelines.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional
|
2 |
+
|
3 |
+
from extensions.multimodal.abstract_pipeline import AbstractMultimodalPipeline
|
4 |
+
|
5 |
+
available_pipelines = ['llava-7b', 'llava-13b', 'llava-llama-2-13b', 'llava-v1.5-13b', 'llava-v1.5-7b']
|
6 |
+
|
7 |
+
|
8 |
+
def get_pipeline(name: str, params: dict) -> Optional[AbstractMultimodalPipeline]:
|
9 |
+
if name == 'llava-7b':
|
10 |
+
from .llava import LLaVA_v0_7B_Pipeline
|
11 |
+
return LLaVA_v0_7B_Pipeline(params)
|
12 |
+
if name == 'llava-13b':
|
13 |
+
from .llava import LLaVA_v0_13B_Pipeline
|
14 |
+
return LLaVA_v0_13B_Pipeline(params)
|
15 |
+
if name == 'llava-llama-2-13b':
|
16 |
+
from .llava import LLaVA_LLaMA_2_13B_Pipeline
|
17 |
+
return LLaVA_LLaMA_2_13B_Pipeline(params)
|
18 |
+
if name == 'llava-v1.5-7b':
|
19 |
+
from .llava import LLaVA_v1_5_7B_Pipeline
|
20 |
+
return LLaVA_v1_5_7B_Pipeline(params)
|
21 |
+
if name == 'llava-v1.5-13b':
|
22 |
+
from .llava import LLaVA_v1_5_13B_Pipeline
|
23 |
+
return LLaVA_v1_5_13B_Pipeline(params)
|
24 |
+
return None
|
25 |
+
|
26 |
+
|
27 |
+
def get_pipeline_from_model_name(model_name: str, params: dict) -> Optional[AbstractMultimodalPipeline]:
|
28 |
+
if 'llava' not in model_name.lower():
|
29 |
+
return None
|
30 |
+
if 'llama-2' in model_name.lower():
|
31 |
+
if '13b' in model_name.lower():
|
32 |
+
from .llava import LLaVA_LLaMA_2_13B_Pipeline
|
33 |
+
return LLaVA_LLaMA_2_13B_Pipeline(params)
|
34 |
+
elif 'llava-v1.5' in model_name.lower():
|
35 |
+
if '13b' in model_name.lower():
|
36 |
+
from .llava import LLaVA_v1_5_13B_Pipeline
|
37 |
+
return LLaVA_v1_5_13B_Pipeline(params)
|
38 |
+
if '7b' in model_name.lower():
|
39 |
+
from .llava import LLaVA_v1_5_7B_Pipeline
|
40 |
+
return LLaVA_v1_5_7B_Pipeline(params)
|
41 |
+
else:
|
42 |
+
if '7b' in model_name.lower():
|
43 |
+
from .llava import LLaVA_v0_7B_Pipeline
|
44 |
+
return LLaVA_v0_7B_Pipeline(params)
|
45 |
+
if '13b' in model_name.lower():
|
46 |
+
from .llava import LLaVA_v0_13B_Pipeline
|
47 |
+
return LLaVA_v0_13B_Pipeline(params)
|
48 |
+
return None
|